aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp1
-rw-r--r--src/backends/backendsCommon/test/OptimizationViewsTests.cpp4
-rw-r--r--src/backends/backendsCommon/test/OptimizedNetworkTests.cpp60
-rw-r--r--src/backends/cl/test/ClFallbackTests.cpp12
-rw-r--r--src/backends/cl/test/ClOptimizedNetworkTests.cpp18
-rw-r--r--src/backends/neon/test/NeonFallbackTests.cpp27
-rw-r--r--src/backends/neon/test/NeonOptimizedNetworkTests.cpp9
-rw-r--r--src/backends/neon/test/NeonTensorHandleTests.cpp8
-rw-r--r--src/backends/reference/test/RefOptimizedNetworkTests.cpp19
9 files changed, 85 insertions, 73 deletions
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 6ab6d2c8ac..13fd190ea2 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -7,6 +7,7 @@
#include <armnn/TensorFwd.hpp>
#include <armnn/Optional.hpp>
#include <armnn/backends/ITensorHandle.hpp>
+#include <armnn/INetwork.hpp>
#include <backendsCommon/Workload.hpp>
diff --git a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
index c972b4b15f..b472a0321d 100644
--- a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
@@ -212,8 +212,8 @@ BOOST_AUTO_TEST_CASE(OptimizeViewsValidateDeviceMockBackend)
BOOST_CHECK(optNet);
// Check the optimised graph
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- CheckLayers(optNetObjPtr->GetGraph());
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ CheckLayers(graph);
}
BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index 721dfb004c..66d166fc08 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -15,12 +15,13 @@ BOOST_AUTO_TEST_SUITE(OptimizedNetwork)
BOOST_AUTO_TEST_CASE(SerializeToDot)
{
- armnn::Network net;
+ // build up the structure of the network
+ armnn::INetworkPtr net(armnn::INetwork::Create());
//Defines layers.
- auto input = net.AddInputLayer(0);
- auto add = net.AddAdditionLayer();
- auto output = net.AddOutputLayer(0);
+ auto input = net->AddInputLayer(0);
+ auto add = net->AddAdditionLayer();
+ auto output = net->AddOutputLayer(0);
// Connects layers.
input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
@@ -36,7 +37,7 @@ BOOST_AUTO_TEST_CASE(SerializeToDot)
armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
+ armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
std::ostringstream ss;
optimizedNet->SerializeToDot(ss);
@@ -127,7 +128,10 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerWithFallback)
armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
BOOST_REQUIRE(optNet);
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ graph.AllocateDynamicBuffers();
+
+ for (auto&& layer : graph)
{
// If NEON is enabled, Input and Output layers are supported by CpuAcc,
// the other layers are supported by CpuRef.
@@ -151,7 +155,8 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)
{
const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
- armnn::Network net;
+ // build up the structure of the network
+ armnn::INetworkPtr net(armnn::INetwork::Create());
armnn::NormalizationDescriptor nmDesc;
armnn::ActivationDescriptor acDesc;
@@ -167,21 +172,21 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)
// sm
// |
// ot
- armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
+ armnn::IConnectableLayer* layer = net->AddInputLayer(0, "in");
layer->GetOutputSlot(0).SetTensorInfo(desc);
- armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
+ armnn::IConnectableLayer* const normLayer = net->AddNormalizationLayer(nmDesc, "nm");
layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
normLayer->GetOutputSlot(0).SetTensorInfo(desc);
- layer = net.AddActivationLayer(acDesc, "ac");
+ layer = net->AddActivationLayer(acDesc, "ac");
normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
layer->GetOutputSlot(0).SetTensorInfo(desc);
armnn::IConnectableLayer* prevLayer = layer;
- layer = net.AddMultiplicationLayer("ml");
+ layer = net->AddMultiplicationLayer("ml");
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -189,13 +194,13 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)
prevLayer = layer;
armnn::SoftmaxDescriptor softmaxDescriptor;
- layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
+ layer = net->AddSoftmaxLayer(softmaxDescriptor, "sm");
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
layer->GetOutputSlot(0).SetTensorInfo(desc);
prevLayer = layer;
- layer = net.AddOutputLayer(0, "ot");
+ layer = net->AddOutputLayer(0, "ot");
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
@@ -207,7 +212,7 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)
try
{
- Optimize(net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
+ Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
BOOST_FAIL("Should have thrown an exception.");
}
catch (const armnn::InvalidArgumentException& e)
@@ -221,7 +226,8 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback
{
const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
- armnn::Network net;
+ // build up the structure of the network
+ armnn::INetworkPtr net(armnn::INetwork::Create());
armnn::NormalizationDescriptor nmDesc;
armnn::ActivationDescriptor acDesc;
@@ -237,21 +243,21 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback
// sm
// |
// ot
- armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
+ armnn::IConnectableLayer* layer = net->AddInputLayer(0, "in");
layer->GetOutputSlot(0).SetTensorInfo(desc);
- armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
+ armnn::IConnectableLayer* const normLayer = net->AddNormalizationLayer(nmDesc, "nm");
layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
normLayer->GetOutputSlot(0).SetTensorInfo(desc);
- layer = net.AddActivationLayer(acDesc, "ac");
+ layer = net->AddActivationLayer(acDesc, "ac");
normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
layer->GetOutputSlot(0).SetTensorInfo(desc);
armnn::IConnectableLayer* prevLayer = layer;
- layer = net.AddMultiplicationLayer("ml");
+ layer = net->AddMultiplicationLayer("ml");
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -259,13 +265,13 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback
prevLayer = layer;
armnn::SoftmaxDescriptor softmaxDescriptor;
- layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
+ layer = net->AddSoftmaxLayer(softmaxDescriptor, "sm");
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
layer->GetOutputSlot(0).SetTensorInfo(desc);
prevLayer = layer;
- layer = net.AddOutputLayer(0, "ot");
+ layer = net->AddOutputLayer(0, "ot");
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
@@ -274,12 +280,15 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback
std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef };
- armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
+ armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
BOOST_CHECK(optNet);
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ graph.AllocateDynamicBuffers();
+
// validate workloads
armnn::RefWorkloadFactory fact;
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ for (auto&& layer : graph)
{
BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
BOOST_CHECK_NO_THROW(
@@ -316,7 +325,10 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback
armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
BOOST_REQUIRE(optNet);
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ graph.AllocateDynamicBuffers();
+
+ for (auto&& layer : graph)
{
// If NEON is enabled, Input and Output layers are supported by CpuAcc,
// the other layers are supported by CpuRef.
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 5885cbe8ef..4384ae5fec 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -51,8 +51,7 @@ BOOST_AUTO_TEST_CASE(ClImportEnabledFallbackToNeon)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -177,8 +176,7 @@ BOOST_AUTO_TEST_CASE(ClImportDisabledFallbackToNeon)
OptimizerOptions optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -307,8 +305,7 @@ BOOST_AUTO_TEST_CASE(ClImportEnabledFallbackSubgraphToNeon)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -445,8 +442,7 @@ BOOST_AUTO_TEST_CASE(ClImportDisableFallbackSubgraphToNeon)
OptimizerOptions optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index dddc5aa8bc..a41c5f87e9 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -39,7 +39,9 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateGpuDeviceSupportLayerNoFallback)
// validate workloads
armnn::ClWorkloadFactory fact =
ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+
+ const armnn::Graph& theGraph = GetGraphForTesting(optNet.get());
+ for (auto&& layer : theGraph)
{
BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
BOOST_CHECK_NO_THROW(
@@ -59,17 +61,17 @@ BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnGpuAcc)
// if there are inverse conversion layers remove them with optimization
// at the moment FloorLayer is not supported in Fp16 so it rolls back to Fp32
// and inverse conversion layers are removed by the optimizer
- armnn::Network net;
+ armnn::INetworkPtr net(armnn::INetwork::Create());
// Defines layers.
- auto input = net.AddInputLayer(0, "input layer");
+ auto input = net->AddInputLayer(0, "input layer");
// ReLu1
armnn::ActivationDescriptor activation1Descriptor;
activation1Descriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
activation1Descriptor.m_A = 1.f;
activation1Descriptor.m_B = -1.f;
- auto activation = net.AddActivationLayer(activation1Descriptor, "activation layer");
- auto output = net.AddOutputLayer(0, "output layer");
+ auto activation = net->AddActivationLayer(activation1Descriptor, "activation layer");
+ auto output = net->AddOutputLayer(0, "output layer");
// Connects layers.
input->GetOutputSlot(0).Connect(activation->GetInputSlot(0));
@@ -89,9 +91,9 @@ BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnGpuAcc)
optimizerOptions.m_ReduceFp32ToFp16 = true;
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
- net, backends, runtime->GetDeviceSpec(), optimizerOptions);
+ *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
- const armnn::Graph& graph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+ const armnn::Graph& graph = GetGraphForTesting(optimizedNet.get());
// Tests that all layers are present in the graph.
BOOST_TEST(graph.GetNumLayers() == 5);
@@ -127,7 +129,7 @@ BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnGpuAcc)
BOOST_CHECK(optimizedNet);
- auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
+ auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
BOOST_TEST(modelOptionsOut.size() == 1);
BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index fd7fbbc4d5..2d70cc2b1b 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -62,8 +62,7 @@ BOOST_AUTO_TEST_CASE(FallbackImportToCpuAcc)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -200,8 +199,7 @@ BOOST_AUTO_TEST_CASE(FallbackPaddingCopyToCpuAcc)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -331,8 +329,7 @@ BOOST_AUTO_TEST_CASE(FallbackImportFromCpuAcc)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -469,8 +466,7 @@ BOOST_AUTO_TEST_CASE(FallbackPaddingCopyFromCpuAcc)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -598,8 +594,7 @@ BOOST_AUTO_TEST_CASE(FallbackDisableImportFromCpuAcc)
std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -723,8 +718,7 @@ BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackToCl)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -849,8 +843,7 @@ BOOST_AUTO_TEST_CASE(NeonImportDisabledFallbackToCl)
OptimizerOptions optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -979,8 +972,7 @@ BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackSubgraphToCl)
optOptions.m_ImportEnabled = true;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -1121,8 +1113,7 @@ BOOST_AUTO_TEST_CASE(NeonImportDisableFallbackSubgraphToCl)
OptimizerOptions optOptions;
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
- OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
- Graph& graph = optNetObjPtr->GetGraph();
+ Graph& graph = GetGraphForTesting(optNet.get());
armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index 85f06174c7..4944c31d71 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -35,7 +35,8 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
armnn::NeonWorkloadFactory fact =
NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ for (auto&& layer : graph)
{
BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
BOOST_CHECK_NO_THROW(
@@ -103,7 +104,7 @@ BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnCpuAcc)
BOOST_CHECK(optimizedNet);
- auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
+ auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
BOOST_TEST(modelOptionsOut.size() == 1);
BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
@@ -134,8 +135,10 @@ BOOST_AUTO_TEST_CASE(NumberOfThreadsTestOnCpuAcc)
*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
BOOST_CHECK(optimizedNet);
+ std::unique_ptr<armnn::Graph> graphPtr;
+ armnn::OptimizedNetworkImpl impl(std::move(graphPtr), optimizerOptions.m_ModelOptions);
- auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
+ auto modelOptionsOut = impl.GetModelOptions();
BOOST_TEST(modelOptionsOut.size() == 1);
BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "NumberOfThreads");
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index e6d740280d..0e24e9505b 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -128,7 +128,7 @@ BOOST_AUTO_TEST_CASE(ConcatOnXorYSubTensorsNoPaddingRequiredTest)
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- const armnn::Graph& theGraph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+ const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
// Load graph into runtime
armnn::NetworkId networkIdentifier;
@@ -211,7 +211,7 @@ BOOST_AUTO_TEST_CASE(ConcatonXorYPaddingRequiredTest)
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- const armnn::Graph& theGraph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+ const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
// Load graph into runtime
armnn::NetworkId networkIdentifier;
@@ -380,7 +380,7 @@ BOOST_AUTO_TEST_CASE(SplitteronXorYNoPaddingRequiredTest)
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- const armnn::Graph& theGraph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+ const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
// Load graph into runtime
armnn::NetworkId networkIdentifier;
@@ -555,7 +555,7 @@ BOOST_AUTO_TEST_CASE(SplitteronXorYPaddingRequiredTest)
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- const armnn::Graph& theGraph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+ const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
// Load graph into runtime
armnn::NetworkId networkIdentifier;
diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
index 16ff202f70..086c1e471a 100644
--- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp
+++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
@@ -71,12 +71,13 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateCpuRefWorkloads)
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph().AllocateDynamicBuffers();
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ graph.AllocateDynamicBuffers();
BOOST_CHECK(optNet);
// Validates workloads.
armnn::RefWorkloadFactory fact;
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ for (auto&& layer : graph)
{
BOOST_CHECK_NO_THROW(layer->CreateWorkload(fact));
}
@@ -109,7 +110,10 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefPermuteLayer)
// optimize the network
armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ graph.AllocateDynamicBuffers();
+
+ for (auto&& layer : graph)
{
BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
}
@@ -141,8 +145,9 @@ BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefMeanLayer)
// optimize the network
armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-
- for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ armnn::Graph& graph = GetGraphForTesting(optNet.get());
+ graph.AllocateDynamicBuffers();
+ for (auto&& layer : graph)
{
BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
}
@@ -183,7 +188,9 @@ BOOST_AUTO_TEST_CASE(DebugTestOnCpuRef)
armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
optimizerOptions);
- const armnn::Graph& graph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+ armnn::Graph& graph = GetGraphForTesting(optimizedNet.get());
+ graph.AllocateDynamicBuffers();
+
// Tests that all layers are present in the graph.
BOOST_TEST(graph.GetNumLayers() == 5);