aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2018-10-22 13:32:01 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-22 16:57:54 +0100
commitc26ba759fe67bd14829a84b5abac80f51ca61946 (patch)
tree0f5696428a899d7734b4264cec6395a8b1fce126
parent5610661023a4170e677027570db18a379842dac3 (diff)
downloadarmnn-c26ba759fe67bd14829a84b5abac80f51ca61946.tar.gz
IVGCVSW-2060: Separate and move backend specific unit tests from the src/armnn/test folder to the backends
* Moved backend-specific memory leak checking tests from RuntimeTests.cpp to the corresponding backend test folder Change-Id: I0a7f4ef52c5350c3cebca23b2b4e61a9446ca11f
-rw-r--r--CMakeLists.txt1
-rw-r--r--src/armnn/test/RuntimeTests.cpp283
-rw-r--r--src/armnn/test/RuntimeTests.hpp14
-rw-r--r--src/backends/cl/backend.mk1
-rw-r--r--src/backends/cl/test/CMakeLists.txt1
-rw-r--r--src/backends/cl/test/ClRuntimeTests.cpp151
-rw-r--r--src/backends/neon/backend.mk1
-rw-r--r--src/backends/neon/test/CMakeLists.txt1
-rw-r--r--src/backends/neon/test/NeonRuntimeTests.cpp68
-rw-r--r--src/backends/reference/backend.mk3
-rw-r--r--src/backends/reference/test/CMakeLists.txt1
-rw-r--r--src/backends/reference/test/RefRuntimeTests.cpp46
-rw-r--r--src/backends/test/RuntimeTestImpl.hpp42
13 files changed, 342 insertions, 271 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e312b36b77..fab3d9d8b3 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -342,6 +342,7 @@ if(BUILD_UNIT_TESTS)
src/armnn/test/OptimizerTests.cpp
src/armnn/test/ProfilerTests.cpp
src/armnn/test/RuntimeTests.cpp
+ src/armnn/test/RuntimeTests.hpp
src/armnn/test/CreateWorkload.hpp
src/armnn/test/TensorTest.cpp
src/armnn/test/TensorHelpers.hpp
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 76f5774a49..8a4e85ee23 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -2,21 +2,22 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
-#include "armnn/TypesUtils.hpp"
+#include <armnn/Descriptors.hpp>
+#include <armnn/IRuntime.hpp>
+#include <armnn/INetwork.hpp>
+#include <armnn/Runtime.hpp>
+#include <armnn/TypesUtils.hpp>
-#include "armnn/IRuntime.hpp"
-#include "armnn/INetwork.hpp"
-#include "armnn/Descriptors.hpp"
-#include "Runtime.hpp"
-#include "HeapProfiling.hpp"
-#include "LeakChecking.hpp"
+#include <armnnUtils/HeapProfiling.hpp>
+#include <armnnUtils/LeakChecking.hpp>
#ifdef WITH_VALGRIND
-#include "valgrind/memcheck.h"
+#include <valgrind/memcheck.h>
#endif
+#include <boost/test/unit_test.hpp>
+
namespace armnn
{
@@ -71,35 +72,6 @@ struct DisableGlobalLeakChecking
BOOST_GLOBAL_FIXTURE(DisableGlobalLeakChecking);
-void CreateAndDropDummyNetwork(const std::vector<armnn::BackendId>& backends, armnn::Runtime& runtime)
-{
- armnn::NetworkId networkIdentifier;
- {
- armnn::TensorInfo inputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
- armnn::TensorInfo outputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
-
- armnn::INetworkPtr network(armnn::INetwork::Create());
-
- armnn::IConnectableLayer* input = network->AddInputLayer(0, "input");
- armnn::IConnectableLayer* layer = network->AddActivationLayer(armnn::ActivationDescriptor(), "test");
- armnn::IConnectableLayer* output = network->AddOutputLayer(0, "output");
-
- input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
- layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
-
- // Sets the tensors in the network.
- input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
- layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
-
- // optimize the network
- armnn::IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime.GetDeviceSpec());
-
- runtime.LoadNetwork(networkIdentifier, std::move(optNet));
- }
-
- runtime.UnloadNetwork(networkIdentifier);
-}
-
BOOST_AUTO_TEST_CASE(RuntimeHeapMemoryUsageSanityChecks)
{
BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
@@ -121,173 +93,9 @@ BOOST_AUTO_TEST_CASE(RuntimeHeapMemoryUsageSanityChecks)
}
}
-#ifdef ARMCOMPUTECL_ENABLED
-BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksGpuAcc)
-{
- BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
- armnn::IRuntime::CreationOptions options;
- armnn::Runtime runtime(options);
- armnn::RuntimeLoadedNetworksReserve(&runtime);
-
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- {
- // Do a warmup of this so we make sure that all one-time
- // initialization happens before we do the leak checking.
- CreateAndDropDummyNetwork(backends, runtime);
- }
-
- {
- ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkGpuAcc");
- BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
- // In the second run we check for all remaining memory
- // in use after the network was unloaded. If there is any
- // then it will be treated as a memory leak.
- CreateAndDropDummyNetwork(backends, runtime);
- BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
- BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
- BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
- }
-}
-#endif // ARMCOMPUTECL_ENABLED
-
-#ifdef ARMCOMPUTENEON_ENABLED
-BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuAcc)
-{
- BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
- armnn::IRuntime::CreationOptions options;
- armnn::Runtime runtime(options);
- armnn::RuntimeLoadedNetworksReserve(&runtime);
-
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- {
- // Do a warmup of this so we make sure that all one-time
- // initialization happens before we do the leak checking.
- CreateAndDropDummyNetwork(backends, runtime);
- }
-
- {
- ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkCpuAcc");
- BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
- // In the second run we check for all remaining memory
- // in use after the network was unloaded. If there is any
- // then it will be treated as a memory leak.
- CreateAndDropDummyNetwork(backends, runtime);
- BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
- BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
- BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
- }
-}
-#endif // ARMCOMPUTENEON_ENABLED
-
-BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuRef)
-{
- BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
-
- armnn::IRuntime::CreationOptions options;
- armnn::Runtime runtime(options);
- armnn::RuntimeLoadedNetworksReserve(&runtime);
-
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- {
- // Do a warmup of this so we make sure that all one-time
- // initialization happens before we do the leak checking.
- CreateAndDropDummyNetwork(backends, runtime);
- }
-
- {
- ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkCpuRef");
- BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
- // In the second run we check for all remaining memory
- // in use after the network was unloaded. If there is any
- // then it will be treated as a memory leak.
- CreateAndDropDummyNetwork(backends, runtime);
- BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
- BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
- BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
- }
-}
-
#endif // ARMNN_LEAK_CHECKING_ENABLED
// Note: this part of the code is due to be removed when we fully trust the gperftools based results.
-#if defined(ARMCOMPUTECL_ENABLED) && defined(WITH_VALGRIND)
-BOOST_AUTO_TEST_CASE(RuntimeMemoryUsage)
-{
- // From documentation:
-
- // This means that no pointer to the block can be found. The block is classified as "lost",
- // because the programmer could not possibly have freed it at program exit, since no pointer to it exists.
- unsigned long leakedBefore = 0;
- unsigned long leakedAfter = 0;
-
- // A start-pointer or chain of start-pointers to the block is found. Since the block is still pointed at,
- // the programmer could, at least in principle, have freed it before program exit.
- // We want to test this in case memory is not freed as early as it could have been.
- unsigned long reachableBefore = 0;
- unsigned long reachableAfter = 0;
-
- // Needed as out params but we don't test them.
- unsigned long dubious = 0;
- unsigned long suppressed = 0;
-
- // Ensure that runtime is large enough before checking for memory leaks.
- // Otherwise, when loading the network, it will automatically reserve memory that won't be released
- // until destruction.
- armnn::NetworkId networkIdentifier;
- armnn::IRuntime::CreationOptions options;
- armnn::Runtime runtime(options);
- armnn::RuntimeLoadedNetworksReserve(&runtime);
-
- // Checks for leaks before we load the network and record them so that we can see the delta after unloading.
- VALGRIND_DO_QUICK_LEAK_CHECK;
- VALGRIND_COUNT_LEAKS(leakedBefore, dubious, reachableBefore, suppressed);
-
- // build a mock-network and load it into the runtime
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- {
- armnn::TensorInfo inputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
- armnn::TensorInfo outputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
-
- armnn::INetworkPtr mockNetwork(armnn::INetwork::Create());
-
- armnn::IConnectableLayer* input = mockNetwork->AddInputLayer(0, "input");
- armnn::IConnectableLayer* layer = mockNetwork->AddActivationLayer(armnn::ActivationDescriptor(), "test");
- armnn::IConnectableLayer* output = mockNetwork->AddOutputLayer(0, "output");
-
- input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
- layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
-
- // Sets the tensors in the network.
- input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
- layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
-
- // optimize the network
- armnn::IOptimizedNetworkPtr optNet = Optimize(*mockNetwork, backends, runtime.GetDeviceSpec());
-
- runtime.LoadNetwork(networkIdentifier, std::move(optNet));
- }
-
- runtime.UnloadNetwork(networkIdentifier);
-
- VALGRIND_DO_ADDED_LEAK_CHECK;
- VALGRIND_COUNT_LEAKS(leakedAfter, dubious, reachableAfter, suppressed);
-
- // If we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass.
- BOOST_TEST(leakedBefore == leakedAfter);
-
- // Add resonable threshold after and before running valgrind with the ACL clear cache function.
- // TODO Threshold set to 80k until the root cause of the memory leakage is found and fixed. Revert threshold
- // value to 1024 when fixed.
- BOOST_TEST(static_cast<long>(reachableAfter) - static_cast<long>(reachableBefore) < 81920);
-
- // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
- // so they are assigned to, but still considered unused, causing a warning.
- boost::ignore_unused(dubious);
- boost::ignore_unused(suppressed);
-}
-#endif
-
-// Note: this part of the code is due to be removed when we fully trust the gperftools based results.
#ifdef WITH_VALGRIND
// Run with the following command to get all the amazing output (in the devenv/build folder) :)
// valgrind --leak-check=full --show-leak-kinds=all --log-file=Valgrind_Memcheck_Leak_Report.txt armnn/test/UnitTests
@@ -341,80 +149,15 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak)
VALGRIND_COUNT_LEAKS(leakedAfter, dubious, reachableAfter, suppressed);
// If we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass.
- BOOST_TEST(leakedBefore == leakedAfter);
-
- #if defined(ARMCOMPUTECL_ENABLED)
- // reachableBefore == reachableAfter should hold, but on OpenCL with Android we are still
- // not entirely able to control the memory in the OpenCL driver. Testing is showing that
- // after this test (which clears all OpenCL memory) we are clearing a little bit more than
- // we expect, probably depending on the order in which other tests are run.
- BOOST_TEST(reachableBefore - reachableAfter <= 24);
- #else
- BOOST_TEST(reachableBefore == reachableAfter);
- #endif
-
- BOOST_TEST(reachableBefore >= reachableAfter);
+ BOOST_TEST(leakedBefore == leakedAfter);
+ BOOST_TEST(reachableBefore == reachableAfter);
// These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
// so they are assigned to, but still considered unused, causing a warning.
boost::ignore_unused(dubious);
boost::ignore_unused(suppressed);
}
-#endif
-
-#if ARMCOMPUTENEON_ENABLED
-BOOST_AUTO_TEST_CASE(RuntimeValidateCpuAccDeviceSupportLayerNoFallback)
-{
- // build up the structure of the network
- armnn::INetworkPtr net(armnn::INetwork::Create());
-
- armnn::IConnectableLayer* input = net->AddInputLayer(0);
-
- armnn::IConnectableLayer* output = net->AddOutputLayer(0);
-
- input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
-
- input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
-
- armnn::IRuntime::CreationOptions options;
- armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
-
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
- armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- BOOST_CHECK(optNet);
-
- // Load it into the runtime. It should success.
- armnn::NetworkId netId;
- BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
-}
-#endif // ARMCOMPUTENEON_ENABLED
-
-#if ARMCOMPUTECL_ENABLED
-BOOST_AUTO_TEST_CASE(RuntimeValidateGpuDeviceSupportLayerNoFallback)
-{
- // build up the structure of the network
- armnn::INetworkPtr net(armnn::INetwork::Create());
-
- armnn::IConnectableLayer* input = net->AddInputLayer(0);
-
- armnn::IConnectableLayer* output = net->AddOutputLayer(0);
-
- input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
-
- input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
-
- armnn::IRuntime::CreationOptions options;
- armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
-
- std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
- armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- BOOST_CHECK(optNet);
-
- // Load it into the runtime. It should success.
- armnn::NetworkId netId;
- BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
-}
-#endif // ARMCOMPUTECL_ENABLED
+#endif // WITH_VALGRIND
BOOST_AUTO_TEST_CASE(RuntimeCpuRef)
{
diff --git a/src/armnn/test/RuntimeTests.hpp b/src/armnn/test/RuntimeTests.hpp
new file mode 100644
index 0000000000..ba2a37baea
--- /dev/null
+++ b/src/armnn/test/RuntimeTests.hpp
@@ -0,0 +1,14 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Runtime.hpp>
+
+namespace armnn
+{
+
+void RuntimeLoadedNetworksReserve(armnn::Runtime* runtime);
+
+} // namespace armnn
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index c549c01c28..1f89f3b0a4 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -47,5 +47,6 @@ BACKEND_TEST_SOURCES := \
test/ClLayerSupportTests.cpp \
test/ClLayerTests.cpp \
test/ClMemCopyTests.cpp \
+ test/ClRuntimeTests.cpp \
test/Fp16SupportTest.cpp \
test/OpenClTimerTest.cpp
diff --git a/src/backends/cl/test/CMakeLists.txt b/src/backends/cl/test/CMakeLists.txt
index 262e23a7c1..69aa08d42b 100644
--- a/src/backends/cl/test/CMakeLists.txt
+++ b/src/backends/cl/test/CMakeLists.txt
@@ -9,6 +9,7 @@ list(APPEND armnnClBackendUnitTests_sources
ClLayerSupportTests.cpp
ClLayerTests.cpp
ClMemCopyTests.cpp
+ ClRuntimeTests.cpp
OpenClTimerTest.cpp
)
diff --git a/src/backends/cl/test/ClRuntimeTests.cpp b/src/backends/cl/test/ClRuntimeTests.cpp
new file mode 100644
index 0000000000..d29cd5b1c2
--- /dev/null
+++ b/src/backends/cl/test/ClRuntimeTests.cpp
@@ -0,0 +1,151 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/test/RuntimeTests.hpp>
+
+#include <armnnUtils/LeakChecking.hpp>
+
+#include <backends/test/RuntimeTestImpl.hpp>
+
+#include <boost/core/ignore_unused.hpp>
+#include <boost/test/unit_test.hpp>
+
+#ifdef WITH_VALGRIND
+#include <valgrind/memcheck.h>
+#endif
+
+BOOST_AUTO_TEST_SUITE(ClRuntime)
+
+BOOST_AUTO_TEST_CASE(RuntimeValidateGpuDeviceSupportLayerNoFallback)
+{
+ // build up the structure of the network
+ armnn::INetworkPtr net(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* input = net->AddInputLayer(0);
+ armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+ input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+ armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
+ BOOST_CHECK(optNet);
+
+ // Load it into the runtime. It should success.
+ armnn::NetworkId netId;
+ BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
+}
+
+#ifdef ARMNN_LEAK_CHECKING_ENABLED
+BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksGpuAcc)
+{
+ BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+ armnn::IRuntime::CreationOptions options;
+ armnn::Runtime runtime(options);
+ armnn::RuntimeLoadedNetworksReserve(&runtime);
+
+ std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+ {
+ // Do a warmup of this so we make sure that all one-time
+ // initialization happens before we do the leak checking.
+ CreateAndDropDummyNetwork(backends, runtime);
+ }
+
+ {
+ ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkGpuAcc");
+ BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+ // In the second run we check for all remaining memory
+ // in use after the network was unloaded. If there is any
+ // then it will be treated as a memory leak.
+ CreateAndDropDummyNetwork(backends, runtime);
+ BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+ BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+ BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+ }
+}
+#endif
+
+// Note: this part of the code is due to be removed when we fully trust the gperftools based results.
+#if defined(WITH_VALGRIND)
+BOOST_AUTO_TEST_CASE(RuntimeMemoryUsage)
+{
+ // From documentation:
+
+ // This means that no pointer to the block can be found. The block is classified as "lost",
+ // because the programmer could not possibly have freed it at program exit, since no pointer to it exists.
+ unsigned long leakedBefore = 0;
+ unsigned long leakedAfter = 0;
+
+ // A start-pointer or chain of start-pointers to the block is found. Since the block is still pointed at,
+ // the programmer could, at least in principle, have freed it before program exit.
+ // We want to test this in case memory is not freed as early as it could have been.
+ unsigned long reachableBefore = 0;
+ unsigned long reachableAfter = 0;
+
+ // Needed as out params but we don't test them.
+ unsigned long dubious = 0;
+ unsigned long suppressed = 0;
+
+ // Ensure that runtime is large enough before checking for memory leaks.
+ // Otherwise, when loading the network, it will automatically reserve memory that won't be released
+ // until destruction.
+ armnn::NetworkId networkIdentifier;
+ armnn::IRuntime::CreationOptions options;
+ armnn::Runtime runtime(options);
+ armnn::RuntimeLoadedNetworksReserve(&runtime);
+
+ // Checks for leaks before we load the network and record them so that we can see the delta after unloading.
+ VALGRIND_DO_QUICK_LEAK_CHECK;
+ VALGRIND_COUNT_LEAKS(leakedBefore, dubious, reachableBefore, suppressed);
+
+ // build a mock-network and load it into the runtime
+ std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+ {
+ armnn::TensorInfo inputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
+
+ armnn::INetworkPtr mockNetwork(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* input = mockNetwork->AddInputLayer(0, "input");
+ armnn::IConnectableLayer* layer = mockNetwork->AddActivationLayer(armnn::ActivationDescriptor(), "test");
+ armnn::IConnectableLayer* output = mockNetwork->AddOutputLayer(0, "output");
+
+ input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+ layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ // Sets the tensors in the network.
+ input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ // optimize the network
+ armnn::IOptimizedNetworkPtr optNet = Optimize(*mockNetwork, backends, runtime.GetDeviceSpec());
+
+ runtime.LoadNetwork(networkIdentifier, std::move(optNet));
+ }
+
+ runtime.UnloadNetwork(networkIdentifier);
+
+ VALGRIND_DO_ADDED_LEAK_CHECK;
+ VALGRIND_COUNT_LEAKS(leakedAfter, dubious, reachableAfter, suppressed);
+
+ // If we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass.
+ BOOST_TEST(leakedBefore == leakedAfter);
+
+ // Add resonable threshold after and before running valgrind with the ACL clear cache function.
+ // TODO Threshold set to 80k until the root cause of the memory leakage is found and fixed. Revert threshold
+ // value to 1024 when fixed.
+ BOOST_TEST(static_cast<long>(reachableAfter) - static_cast<long>(reachableBefore) < 81920);
+
+ // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
+ // so they are assigned to, but still considered unused, causing a warning.
+ boost::ignore_unused(dubious);
+ boost::ignore_unused(suppressed);
+}
+#endif
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 4cab9fbd71..a4e6db9610 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -44,4 +44,5 @@ BACKEND_TEST_SOURCES := \
test/NeonLayerSupportTests.cpp \
test/NeonLayerTests.cpp \
test/NeonMemCopyTests.cpp \
+ test/NeonRuntimeTests.cpp \
test/NeonTimerTest.cpp
diff --git a/src/backends/neon/test/CMakeLists.txt b/src/backends/neon/test/CMakeLists.txt
index 384a5e1749..e6a28590b5 100644
--- a/src/backends/neon/test/CMakeLists.txt
+++ b/src/backends/neon/test/CMakeLists.txt
@@ -8,6 +8,7 @@ list(APPEND armnnNeonBackendUnitTests_sources
NeonLayerSupportTests.cpp
NeonLayerTests.cpp
NeonMemCopyTests.cpp
+ NeonRuntimeTests.cpp
NeonTimerTest.cpp
)
diff --git a/src/backends/neon/test/NeonRuntimeTests.cpp b/src/backends/neon/test/NeonRuntimeTests.cpp
new file mode 100644
index 0000000000..6e6b1e9148
--- /dev/null
+++ b/src/backends/neon/test/NeonRuntimeTests.cpp
@@ -0,0 +1,68 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/test/RuntimeTests.hpp>
+
+#include <armnnUtils/LeakChecking.hpp>
+
+#include <backends/test/RuntimeTestImpl.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(NeonRuntime)
+
+BOOST_AUTO_TEST_CASE(RuntimeValidateCpuAccDeviceSupportLayerNoFallback)
+{
+ // build up the structure of the network
+ armnn::INetworkPtr net(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* input = net->AddInputLayer(0);
+ armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+ input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+ armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
+ BOOST_CHECK(optNet);
+
+ // Load it into the runtime. It should success.
+ armnn::NetworkId netId;
+ BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
+}
+
+#ifdef ARMNN_LEAK_CHECKING_ENABLED
+BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuAcc)
+{
+ BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+ armnn::IRuntime::CreationOptions options;
+ armnn::Runtime runtime(options);
+ armnn::RuntimeLoadedNetworksReserve(&runtime);
+
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+ {
+ // Do a warmup of this so we make sure that all one-time
+ // initialization happens before we do the leak checking.
+ CreateAndDropDummyNetwork(backends, runtime);
+ }
+
+ {
+ ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkCpuAcc");
+ BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+ // In the second run we check for all remaining memory
+ // in use after the network was unloaded. If there is any
+ // then it will be treated as a memory leak.
+ CreateAndDropDummyNetwork(backends, runtime);
+ BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+ BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+ BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+ }
+}
+#endif
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 9ecb6d75b2..455ab4618e 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -66,4 +66,5 @@ BACKEND_SOURCES := \
BACKEND_TEST_SOURCES := \
test/RefCreateWorkloadTests.cpp \
test/RefLayerSupportTests.cpp \
- test/RefLayerTests.cpp
+ test/RefLayerTests.cpp \
+ test/RefRuntimeTests.cpp
diff --git a/src/backends/reference/test/CMakeLists.txt b/src/backends/reference/test/CMakeLists.txt
index deee364a9a..dea0ef6498 100644
--- a/src/backends/reference/test/CMakeLists.txt
+++ b/src/backends/reference/test/CMakeLists.txt
@@ -7,6 +7,7 @@ list(APPEND armnnRefBackendUnitTests_sources
RefCreateWorkloadTests.cpp
RefLayerSupportTests.cpp
RefLayerTests.cpp
+ RefRuntimeTests.cpp
)
add_library(armnnRefBackendUnitTests OBJECT ${armnnRefBackendUnitTests_sources})
diff --git a/src/backends/reference/test/RefRuntimeTests.cpp b/src/backends/reference/test/RefRuntimeTests.cpp
new file mode 100644
index 0000000000..2536627ea6
--- /dev/null
+++ b/src/backends/reference/test/RefRuntimeTests.cpp
@@ -0,0 +1,46 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/test/RuntimeTests.hpp>
+
+#include <armnnUtils/LeakChecking.hpp>
+
+#include <backends/test/RuntimeTestImpl.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(RefRuntime)
+
+#ifdef ARMNN_LEAK_CHECKING_ENABLED
+BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuRef)
+{
+ BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::Runtime runtime(options);
+ armnn::RuntimeLoadedNetworksReserve(&runtime);
+
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ {
+ // Do a warmup of this so we make sure that all one-time
+ // initialization happens before we do the leak checking.
+ CreateAndDropDummyNetwork(backends, runtime);
+ }
+
+ {
+ ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkCpuRef");
+ BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+ // In the second run we check for all remaining memory
+ // in use after the network was unloaded. If there is any
+ // then it will be treated as a memory leak.
+ CreateAndDropDummyNetwork(backends, runtime);
+ BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+ BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+ BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+ }
+}
+#endif
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/backends/test/RuntimeTestImpl.hpp b/src/backends/test/RuntimeTestImpl.hpp
new file mode 100644
index 0000000000..671f94b0bb
--- /dev/null
+++ b/src/backends/test/RuntimeTestImpl.hpp
@@ -0,0 +1,42 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Runtime.hpp>
+
+namespace
+{
+
+inline void CreateAndDropDummyNetwork(const std::vector<armnn::BackendId>& backends, armnn::Runtime& runtime)
+{
+ armnn::NetworkId networkIdentifier;
+ {
+ armnn::TensorInfo inputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
+
+ armnn::INetworkPtr network(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* input = network->AddInputLayer(0, "input");
+ armnn::IConnectableLayer* layer = network->AddActivationLayer(armnn::ActivationDescriptor(), "test");
+ armnn::IConnectableLayer* output = network->AddOutputLayer(0, "output");
+
+ input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+ layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ // Sets the tensors in the network.
+ input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+ layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ // optimize the network
+ armnn::IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime.GetDeviceSpec());
+
+ runtime.LoadNetwork(networkIdentifier, std::move(optNet));
+ }
+
+ runtime.UnloadNetwork(networkIdentifier);
+}
+
+} // anonymous namespace