ArmNN
 22.05.01
ClOptimizedNetworkTests.cpp File Reference
#include "ClWorkloadFactoryHelper.hpp"
#include <Network.hpp>
#include <GraphUtils.hpp>
#include <cl/ClWorkloadFactory.hpp>
#include <cl/ClBackendContext.hpp>
#include <armnnUtils/Filesystem.hpp>
#include <doctest/doctest.h>

Go to the source code of this file.

Functions

 TEST_SUITE ("ClOptimizedNetwork")
 

Function Documentation

◆ TEST_SUITE()

TEST_SUITE ( "ClOptimizedNetwork"  )

Definition at line 19 of file ClOptimizedNetworkTests.cpp.

References armnn::BoundedReLu, ClBackendContext::ClBackendContext(), IOutputSlot::Connect(), IRuntime::Create(), INetwork::Create(), armnn::Float32, armnn::GetGraphForTesting(), IConnectableLayer::GetInputSlot(), armnn::GetModelOptionsForTesting(), IConnectableLayer::GetOutputSlot(), armnn::GpuAcc, GraphHasNamedLayer(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, IRuntime::CreationOptions::m_BackendOptions, ActivationDescriptor::m_Function, OptimizerOptions::m_ModelOptions, OptimizerOptions::m_ReduceFp32ToFp16, armnnUtils::Filesystem::NamedTempFile(), armnn::Optimize(), and IOutputSlot::SetTensorInfo().

20 {
21 TEST_CASE("OptimizeValidateGpuDeviceSupportLayerNoFallback")
22 {
23  // build up the structure of the network
25 
26  armnn::IConnectableLayer* input = net->AddInputLayer(0);
27  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
28 
29  input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
31 
34 
35  std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
36  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
37  CHECK(optNet);
38  // validate workloads
40  ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
41 
42  const armnn::Graph& theGraph = GetGraphForTesting(optNet.get());
43  for (auto&& layer : theGraph)
44  {
45  CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
46  CHECK_NOTHROW(
47  layer->CreateWorkload(fact));
48  }
49 }
50 
51 TEST_CASE("FP16TurboModeTestOnGpuAcc")
52 {
53  // Test to check when Fp16 Turbo mode set
54  // it converts the Fp32 network to Fp16 Network
55  // add Fp32ToFp16 conversion layer after the InputLayer
56  // add Fp16ToFp32 conversion layer after the OutputLayer
57  // checks the other layers if they are supported in Fp16
58  // if they are not put the conversion layers before and after
59  // if they are not supported in Fp16 use Fp32 instead
60  // if there are inverse conversion layers remove them with optimization
61  // at the moment FloorLayer is not supported in Fp16 so it rolls back to Fp32
62  // and inverse conversion layers are removed by the optimizer
64 
65  // Defines layers.
66  auto input = net->AddInputLayer(0, "input layer");
67  // ReLu1
68  armnn::ActivationDescriptor activation1Descriptor;
69  activation1Descriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
70  activation1Descriptor.m_A = 1.f;
71  activation1Descriptor.m_B = -1.f;
72  auto activation = net->AddActivationLayer(activation1Descriptor, "activation layer");
73  auto output = net->AddOutputLayer(0, "output layer");
74 
75  // Connects layers.
76  input->GetOutputSlot(0).Connect(activation->GetInputSlot(0));
77  activation->GetOutputSlot(0).Connect(output->GetInputSlot(0));
78 
79  armnn::TensorShape shape({4});
81  input->GetOutputSlot(0).SetTensorInfo(info);
82  activation->GetOutputSlot(0).SetTensorInfo(info);
83 
86 
87  std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
88 
89  armnn::OptimizerOptions optimizerOptions;
90  optimizerOptions.m_ReduceFp32ToFp16 = true;
91 
93  *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
94 
95  const armnn::Graph& graph = GetGraphForTesting(optimizedNet.get());
96 
97  // Tests that all layers are present in the graph.
98  CHECK(graph.GetNumLayers() == 5);
99 
100  // Tests that the vertices exist and have correct names.
101  CHECK(GraphHasNamedLayer(graph, "input layer"));
102  CHECK(GraphHasNamedLayer(graph, "convert_fp32_to_fp16-0-input layer"));
103  CHECK(GraphHasNamedLayer(graph, "activation layer"));
104  CHECK(GraphHasNamedLayer(graph, "convert_fp16_to_fp32-0-output layer"));
105  CHECK(GraphHasNamedLayer(graph, "output layer"));
106 }
107 
108 TEST_CASE("FastMathEnabledTestOnGpuAcc")
109 {
111 
112  armnn::IConnectableLayer* input = net->AddInputLayer(0);
113  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
114 
115  input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
117 
120 
121  std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
122  armnn::OptimizerOptions optimizerOptions;
123  armnn::BackendOptions modelOptions("GpuAcc", {{"FastMathEnabled", true}});
124  optimizerOptions.m_ModelOptions.push_back(modelOptions);
125 
127  *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
128 
129  CHECK(optimizedNet);
130 
131  auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
132 
133  CHECK(modelOptionsOut.size() == 1);
134  CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
135  CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
136 }
137 
138 TEST_CASE("CheckMLGOTuningFile")
139 {
140  class ClBackendContextTestClass : public armnn::ClBackendContext
141  {
142  public:
143  ClBackendContextTestClass(const armnn::IRuntime::CreationOptions &options) : ClBackendContext(options)
144  {}
145 
146  bool call_reload_from_file()
147  {
148  return m_MLGOTuner.reload_from_file(m_MLGOTuningFile);
149  }
150  };
151 
152  const std::string validText{
153  "<header>\n"
154  "gemm-version, [1,2,1]\n"
155  "ip-type,gpu\n"
156  "</header>\n"
157  "<heuristics-table>\n"
158  "0, g71 , 8, f32, best-performance, static, gemm-type, [m,n,k,n]\n"
159  "1, g71 , 8, f32, best-performance, static, gemm-config-reshaped-only-rhs, [m,n,k,n]\n"
160  "2, g71 , 8, f32, best-performance, static, gemm-config-reshaped, [m,n,k,n]\n"
161  "3, g71 , 8, qasymm8, best-performance, static, gemm-type, [m,n,k,n]\n"
162  "4, g71 , 8, qasymm8, best-performance, static, gemm-config-reshaped-only-rhs, [m,n,k,n]\n"
163  "5, g71 , 8, qasymm8, best-performance, static, gemm-config-native, [m,n,k,n]\n"
164  "</heuristics-table>\n"
165  "<heuristic, 0>\n"
166  "b , 0, var, r_mn, >=, num, 2., 1, 2\n"
167  "l , 1, gemm-type, reshaped\n"
168  "l , 2, gemm-type, reshaped-only-rhs\n"
169  "</heuristic>\n"
170  "<heuristic, 1>\n"
171  "l ,0,gemm-config-reshaped-only-rhs, [2, 4,4,4,1,1,0]\n"
172  "</heuristic>\n"
173  "<heuristic, 2>\n"
174  "l ,0,gemm-config-reshaped,[4,2,8,16,16,1,0,1,0]\n"
175  "</heuristic>\n"
176  "<heuristic, 3>\n"
177  "l , 0, gemm-type, native\n"
178  "</heuristic>\n"
179  "<heuristic, 4>\n"
180  "l ,0,gemm-config-reshaped-only-rhs, [2, 4,4,4,1,1,0]\n"
181  "</heuristic>\n"
182  "<heuristic, 5>\n"
183  "l ,0,gemm-config-native,[4,2,8]\n"
184  "</heuristic>\n"};
185 
186  const std::string invalidText{"ʕノ•ᴥ•ʔノ ︵ ┻━┻"};
187 
188  fs::path validFile = armnnUtils::Filesystem::NamedTempFile("validFile.mlgo");
189  fs::path invalidFile = armnnUtils::Filesystem::NamedTempFile("invalidFile.mlgo");
190 
191  try
192  {
193  std::ofstream ofs1{validFile};
194  ofs1 << validText << std::endl;
195  ofs1.close();
196 
197  std::ofstream ofs2{invalidFile};
198  ofs2 << invalidText << std::endl;
199  ofs2.close();
200  }
201  catch (std::exception &e)
202  {
203  std::cerr << "Unable to write to file at location [" << validFile.c_str() << "] : " << e.what() << std::endl;
204  CHECK(false);
205  }
206 
207  armnn::IRuntime::CreationOptions creationOptions1;
208  armnn::BackendOptions validOptions
209  {
210  "GpuAcc",
211  {
212  {"MLGOTuningFilePath", validFile.c_str()}
213  }
214  };
215 
216  creationOptions1.m_BackendOptions.emplace_back(validOptions);
217  ClBackendContextTestClass clBackendContext1(creationOptions1);
218  CHECK(clBackendContext1.call_reload_from_file());
219 
220  armnn::BackendOptions invalidOptions
221  {
222  "GpuAcc",
223  {
224  {"MLGOTuningFilePath", invalidFile.c_str()}
225  }
226  };
227 
228  armnn::IRuntime::CreationOptions creationOptions2;
229  creationOptions2.m_BackendOptions.emplace_back(invalidOptions);
230  ClBackendContextTestClass clBackendContext2(creationOptions2);
231  CHECK(clBackendContext2.call_reload_from_file() == false);
232 
233  armnn::BackendOptions invalidPathOptions
234  {
235  "GpuAcc",
236  {
237  {"MLGOTuningFilePath", "not_a_real_file_path"}
238  }
239  };
240 
241  armnn::IRuntime::CreationOptions creationOptions3;
242  creationOptions3.m_BackendOptions.emplace_back(invalidPathOptions);
243  ClBackendContextTestClass clBackendContext3(creationOptions3);
244  CHECK(clBackendContext3.call_reload_from_file() == false);
245 }
246 
247 }
ModelOptions m_ModelOptions
Definition: INetwork.hpp:233
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:49
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:33
bool GraphHasNamedLayer(const armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:10
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
bool m_ReduceFp32ToFp16
Reduces all Fp32 operators in the model to Fp16 for faster processing.
Definition: INetwork.hpp:214
std::vector< BackendOptions > m_BackendOptions
Pass backend specific options.
Definition: IRuntime.hpp:189
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1847
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:242
GPU Execution: OpenCL: ArmCompute.
ArmNN performs an optimization on each model/network before it gets loaded for execution.
Definition: INetwork.hpp:137
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
min(a, max(b, input)) ReLu1 & ReLu6.
Graph & GetGraphForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:49
Struct for the users to pass backend specific options.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:61
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
ModelOptions & GetModelOptionsForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:54
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
fs::path NamedTempFile(const char *fileName)
Returns a path to a file in the system temporary folder. If the file existed it will be deleted...
Definition: Filesystem.cpp:24
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:241
virtual int Connect(IInputSlot &destination)=0
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:476
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:59