ArmNN
 21.08
NeonOptimizedNetworkTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <Graph.hpp>
9 #include <Network.hpp>
10 
12 
13 #include <doctest/doctest.h>
14 
15 TEST_SUITE("NeonOptimizedNetwork")
16 {
17 TEST_CASE("OptimizeValidateCpuAccDeviceSupportLayerNoFallback")
18 {
19  // build up the structure of the network
21 
22  armnn::IConnectableLayer* input = net->AddInputLayer(0);
23  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
24 
25  input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
27 
30 
31  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
32  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
33  CHECK(optNet);
34  // validate workloads
36  NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
37 
38  armnn::Graph& graph = GetGraphForTesting(optNet.get());
39  for (auto&& layer : graph)
40  {
41  CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
42  CHECK_NOTHROW(
43  layer->CreateWorkload(fact));
44  }
45 }
46 
47 TEST_CASE("OptimizeValidateDeviceNonSupportLayerNoFallback")
48 {
49  // build up the structure of the network
51 
52  armnn::IConnectableLayer* input = net->AddInputLayer(0);
53 
54  // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so Optimize will return null.
56  armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
57 
58  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
59 
60  input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
61  normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
62 
65 
68 
69  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
70  std::vector<std::string> errMessages;
71 
72  try
73  {
74  Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
75  FAIL("Should have thrown an exception.");
76  }
77  catch (const armnn::InvalidArgumentException& e)
78  {
79  // Different exceptions are thrown on different backends
80  }
81  CHECK(errMessages.size() > 0);
82 }
83 
84 TEST_CASE("FastMathEnabledTestOnCpuAcc")
85 {
87 
88  armnn::IConnectableLayer* input = net->AddInputLayer(0);
89  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
90 
91  input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
93 
96 
97  std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
98  armnn::OptimizerOptions optimizerOptions;
99  armnn::BackendOptions modelOptions("CpuAcc", {{"FastMathEnabled", true}});
100  optimizerOptions.m_ModelOptions.push_back(modelOptions);
101 
103  *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
104 
105  CHECK(optimizedNet);
106 
107  auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
108 
109  CHECK(modelOptionsOut.size() == 1);
110  CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
111  CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
112 }
113 
114 TEST_CASE("NumberOfThreadsTestOnCpuAcc")
115 {
117 
118  armnn::IConnectableLayer* input = net->AddInputLayer(0);
119  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
120 
121  input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
123 
126 
127  unsigned int numberOfThreads = 2;
128 
129  std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
130  armnn::OptimizerOptions optimizerOptions;
131  armnn::BackendOptions modelOptions("CpuAcc", {{"NumberOfThreads", numberOfThreads}});
132  optimizerOptions.m_ModelOptions.push_back(modelOptions);
133 
135  *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
136 
137  CHECK(optimizedNet);
138  std::unique_ptr<armnn::Graph> graphPtr;
139  armnn::OptimizedNetworkImpl impl(std::move(graphPtr), optimizerOptions.m_ModelOptions);
140 
141  auto modelOptionsOut = impl.GetModelOptions();
142 
143  CHECK(modelOptionsOut.size() == 1);
144  CHECK(modelOptionsOut[0].GetOption(0).GetName() == "NumberOfThreads");
145  CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsUnsignedInt() == numberOfThreads);
146 }
147 
148 }
ModelOptions m_ModelOptions
Definition: INetwork.hpp:167
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:39
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:30
TEST_SUITE("NeonOptimizedNetwork")
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1613
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:173
Graph & GetGraphForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:25
Struct for the users to pass backend specific options.
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
ModelOptions & GetModelOptionsForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:30
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:172
virtual int Connect(IInputSlot &destination)=0
A NormalizationDescriptor for the NormalizationLayer.
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:530