ArmNN
 21.02
NeonOptimizedNetworkTests.cpp File Reference
#include "NeonWorkloadFactoryHelper.hpp"
#include <Graph.hpp>
#include <Network.hpp>
#include <neon/NeonWorkloadFactory.hpp>
#include <boost/test/unit_test.hpp>

Go to the source code of this file.

Functions

 BOOST_AUTO_TEST_CASE (OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
 
 BOOST_AUTO_TEST_CASE (OptimizeValidateDeviceNonSupportLayerNoFallback)
 
 BOOST_AUTO_TEST_CASE (FastMathEnabledTestOnCpuAcc)
 
 BOOST_AUTO_TEST_CASE (NumberOfThreadsTestOnCpuAcc)
 

Function Documentation

◆ BOOST_AUTO_TEST_CASE() [1/4]

BOOST_AUTO_TEST_CASE ( OptimizeValidateCpuAccDeviceSupportLayerNoFallback  )

Definition at line 17 of file NeonOptimizedNetworkTests.cpp.

References IOutputSlot::Connect(), armnn::CpuAcc, IRuntime::Create(), INetwork::Create(), armnn::Float32, armnn::GetGraphForTesting(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::Optimize(), and IOutputSlot::SetTensorInfo().

18 {
19  // build up the structure of the network
21 
22  armnn::IConnectableLayer* input = net->AddInputLayer(0);
23  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
24 
25  input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
27 
30 
31  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
32  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
33  BOOST_CHECK(optNet);
34  // validate workloads
36  NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
37 
38  armnn::Graph& graph = GetGraphForTesting(optNet.get());
39  for (auto&& layer : graph)
40  {
41  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
42  BOOST_CHECK_NO_THROW(
43  layer->CreateWorkload(fact));
44  }
45 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1502
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174
Graph & GetGraphForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:25
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
virtual int Connect(IInputSlot &destination)=0
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510

◆ BOOST_AUTO_TEST_CASE() [2/4]

BOOST_AUTO_TEST_CASE ( OptimizeValidateDeviceNonSupportLayerNoFallback  )

Definition at line 47 of file NeonOptimizedNetworkTests.cpp.

References IOutputSlot::Connect(), armnn::CpuAcc, IRuntime::Create(), INetwork::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::Optimize(), and IOutputSlot::SetTensorInfo().

48 {
49  // build up the structure of the network
51 
52  armnn::IConnectableLayer* input = net->AddInputLayer(0);
53 
54  // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so Optimize will return null.
56  armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
57 
58  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
59 
60  input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
61  normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
62 
65 
68 
69  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
70  std::vector<std::string> errMessages;
71 
72  try
73  {
74  Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
75  BOOST_FAIL("Should have thrown an exception.");
76  }
77  catch (const armnn::InvalidArgumentException& e)
78  {
79  // Different exceptions are thrown on different backends
80  }
81  BOOST_CHECK(errMessages.size() > 0);
82 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1502
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
virtual int Connect(IInputSlot &destination)=0
A NormalizationDescriptor for the NormalizationLayer.
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510

◆ BOOST_AUTO_TEST_CASE() [3/4]

BOOST_AUTO_TEST_CASE ( FastMathEnabledTestOnCpuAcc  )

Definition at line 84 of file NeonOptimizedNetworkTests.cpp.

References IOutputSlot::Connect(), armnn::CpuAcc, IRuntime::Create(), INetwork::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), armnn::GetModelOptionsForTesting(), IConnectableLayer::GetOutputSlot(), OptimizerOptions::m_ModelOptions, armnn::Optimize(), and IOutputSlot::SetTensorInfo().

85 {
87 
88  armnn::IConnectableLayer* input = net->AddInputLayer(0);
89  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
90 
91  input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
93 
96 
97  std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
98  armnn::OptimizerOptions optimizerOptions;
99  armnn::BackendOptions modelOptions("CpuAcc", {{"FastMathEnabled", true}});
100  optimizerOptions.m_ModelOptions.push_back(modelOptions);
101 
103  *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
104 
105  BOOST_CHECK(optimizedNet);
106 
107  auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
108 
109  BOOST_TEST(modelOptionsOut.size() == 1);
110  BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
111  BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
112 }
ModelOptions m_ModelOptions
Definition: INetwork.hpp:168
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1502
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174
Struct for the users to pass backend specific options.
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
ModelOptions & GetModelOptionsForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:30
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
virtual int Connect(IInputSlot &destination)=0
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510

◆ BOOST_AUTO_TEST_CASE() [4/4]

BOOST_AUTO_TEST_CASE ( NumberOfThreadsTestOnCpuAcc  )

Definition at line 114 of file NeonOptimizedNetworkTests.cpp.

References BOOST_AUTO_TEST_SUITE_END(), IOutputSlot::Connect(), armnn::CpuAcc, IRuntime::Create(), INetwork::Create(), armnn::Float32, IConnectableLayer::GetInputSlot(), OptimizedNetworkImpl::GetModelOptions(), IConnectableLayer::GetOutputSlot(), OptimizerOptions::m_ModelOptions, armnn::Optimize(), and IOutputSlot::SetTensorInfo().

115 {
117 
118  armnn::IConnectableLayer* input = net->AddInputLayer(0);
119  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
120 
121  input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
123 
126 
127  unsigned int numberOfThreads = 2;
128 
129  std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
130  armnn::OptimizerOptions optimizerOptions;
131  armnn::BackendOptions modelOptions("CpuAcc", {{"NumberOfThreads", numberOfThreads}});
132  optimizerOptions.m_ModelOptions.push_back(modelOptions);
133 
135  *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
136 
137  BOOST_CHECK(optimizedNet);
138  std::unique_ptr<armnn::Graph> graphPtr;
139  armnn::OptimizedNetworkImpl impl(std::move(graphPtr), optimizerOptions.m_ModelOptions);
140 
141  auto modelOptionsOut = impl.GetModelOptions();
142 
143  BOOST_TEST(modelOptionsOut.size() == 1);
144  BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "NumberOfThreads");
145  BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsUnsignedInt() == numberOfThreads);
146 }
ModelOptions m_ModelOptions
Definition: INetwork.hpp:168
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1502
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174
Struct for the users to pass backend specific options.
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
virtual int Connect(IInputSlot &destination)=0
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510