ArmNN
 21.02
RefOptimizedNetworkTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <Graph.hpp>
7 #include <Network.hpp>
8 
10 
11 #include <boost/test/unit_test.hpp>
12 #include <test/GraphUtils.hpp>
13 
14 BOOST_AUTO_TEST_SUITE(RefOptimizedNetwork)
15 
16 BOOST_AUTO_TEST_CASE(OptimizeValidateCpuRefWorkloads)
17 {
18  const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
19 
20  // build up the structure of the network
22 
25 
26  // in
27  // |
28  // nm
29  // / |
30  // ac |
31  // \ |
32  // ml
33  // |
34  // sm
35  // |
36  // ot
37  armnn::IConnectableLayer* layer = net->AddInputLayer(0, "in");
38  layer->GetOutputSlot(0).SetTensorInfo(desc);
39 
40  armnn::IConnectableLayer* const normLayer = net->AddNormalizationLayer(nmDesc, "nm");
41 
42  layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
43  normLayer->GetOutputSlot(0).SetTensorInfo(desc);
44 
45  layer = net->AddActivationLayer(acDesc, "ac");
46 
47  normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
48  layer->GetOutputSlot(0).SetTensorInfo(desc);
49 
50  armnn::IConnectableLayer* prevLayer = layer;
51  layer = net->AddMultiplicationLayer("ml");
52 
53  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
54  normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
55  layer->GetOutputSlot(0).SetTensorInfo(desc);
56 
57  prevLayer = layer;
58  armnn::SoftmaxDescriptor softmaxDescriptor;
59  layer = net->AddSoftmaxLayer(softmaxDescriptor, "sm");
60 
61  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
62  layer->GetOutputSlot(0).SetTensorInfo(desc);
63 
64  prevLayer = layer;
65  layer = net->AddOutputLayer(0, "ot");
66 
67  prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
68 
71 
72  std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
73  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
74  armnn::Graph& graph = GetGraphForTesting(optNet.get());
75  graph.AllocateDynamicBuffers();
76  BOOST_CHECK(optNet);
77 
78  // Validates workloads.
80  for (auto&& layer : graph)
81  {
82  BOOST_CHECK_NO_THROW(layer->CreateWorkload(fact));
83  }
84 }
85 
86 BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefPermuteLayer)
87 {
88  // Create runtime in which test will run
91 
92  std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
93 
94  // build up the structure of the network
96 
97  armnn::IConnectableLayer* input = net->AddInputLayer(0);
98 
99  armnn::PermuteDescriptor descriptor({0, 2, 3, 1});
100  armnn::IConnectableLayer* permute = net->AddPermuteLayer(descriptor);
101 
102  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
103 
104  input->GetOutputSlot(0).Connect(permute->GetInputSlot(0));
105  permute->GetOutputSlot(0).Connect(output->GetInputSlot(0));
106 
108  permute->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 4, 1, 4 }, armnn::DataType::Float32));
109 
110  // optimize the network
111  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
112 
113  armnn::Graph& graph = GetGraphForTesting(optNet.get());
114  graph.AllocateDynamicBuffers();
115 
116  for (auto&& layer : graph)
117  {
118  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
119  }
120 }
121 
122 BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefMeanLayer)
123 {
124  // Create runtime in which test will run
127 
128  std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
129 
130  // build up the structure of the network
132 
133  armnn::IConnectableLayer* input = net->AddInputLayer(0);
134 
135  armnn::MeanDescriptor descriptor({ 0, 1 }, false);
136  armnn::IConnectableLayer* meanLayer = net->AddMeanLayer(descriptor);
137 
138  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
139 
140  input->GetOutputSlot(0).Connect(meanLayer->GetInputSlot(0));
141  meanLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
142 
144  meanLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 2 }, armnn::DataType::Float32));
145 
146  // optimize the network
147  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
148  armnn::Graph& graph = GetGraphForTesting(optNet.get());
149  graph.AllocateDynamicBuffers();
150  for (auto&& layer : graph)
151  {
152  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
153  }
154 }
155 
156 BOOST_AUTO_TEST_CASE(DebugTestOnCpuRef)
157 {
158  // build up the structure of the network
160 
161  armnn::ActivationDescriptor activation1Descriptor;
162  activation1Descriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
163  activation1Descriptor.m_A = 1.f;
164  activation1Descriptor.m_B = -1.f;
165 
166  // Defines layers.
167  auto input = net->AddInputLayer(0, "InputLayer");
168  auto activation = net->AddActivationLayer(activation1Descriptor, "ActivationLayer");
169  auto output = net->AddOutputLayer(0, "OutputLayer");
170 
171  // Connects layers.
172  input->GetOutputSlot(0).Connect(activation->GetInputSlot(0));
173  activation->GetOutputSlot(0).Connect(output->GetInputSlot(0));
174 
175  armnn::TensorShape shape({4});
177  input->GetOutputSlot(0).SetTensorInfo(info);
178  activation->GetOutputSlot(0).SetTensorInfo(info);
179 
182 
183  std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
184 
185  armnn::OptimizerOptions optimizerOptions;
186  optimizerOptions.m_Debug = true;
187 
188  armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
189  optimizerOptions);
190 
191  armnn::Graph& graph = GetGraphForTesting(optimizedNet.get());
192  graph.AllocateDynamicBuffers();
193 
194  // Tests that all layers are present in the graph.
195  BOOST_TEST(graph.GetNumLayers() == 5);
196 
197  // Tests that the vertices exist and have correct names.
198  BOOST_TEST(GraphHasNamedLayer(graph, "InputLayer"));
199  BOOST_TEST(GraphHasNamedLayer(graph, "DebugLayerAfterInputLayer"));
200  BOOST_TEST(GraphHasNamedLayer(graph, "ActivationLayer"));
201  BOOST_TEST(GraphHasNamedLayer(graph, "DebugLayerAfterActivationLayer"));
202  BOOST_TEST(GraphHasNamedLayer(graph, "OutputLayer"));
203 }
204 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
CPU Execution: Reference C++ kernels.
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
bool GraphHasNamedLayer(const armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:10
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1502
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
min(a, max(b, input)) ReLu1 & ReLu6.
BOOST_AUTO_TEST_CASE(OptimizeValidateCpuRefWorkloads)
Graph & GetGraphForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:25
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
BOOST_AUTO_TEST_SUITE_END()
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
virtual int Connect(IInputSlot &destination)=0
A NormalizationDescriptor for the NormalizationLayer.
Status AllocateDynamicBuffers()
Allocates memory for all tensors under output tensor handers of each layer.
Definition: Graph.cpp:179
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
A SoftmaxDescriptor for the SoftmaxLayer.
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
A PermuteDescriptor for the PermuteLayer.