ArmNN
 21.02
ClOptimizedNetworkTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <Network.hpp>
9 
10 #include <test/GraphUtils.hpp>
11 
12 #include <cl/ClWorkloadFactory.hpp>
13 #include <cl/ClBackendContext.hpp>
14 
15 #include <Filesystem.hpp>
16 
17 
18 #include <boost/test/unit_test.hpp>
19 
20 BOOST_AUTO_TEST_SUITE(ClOptimizedNetwork)
21 
22 BOOST_AUTO_TEST_CASE(OptimizeValidateGpuDeviceSupportLayerNoFallback)
23 {
24  // build up the structure of the network
26 
27  armnn::IConnectableLayer* input = net->AddInputLayer(0);
28  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
29 
30  input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
32 
35 
36  std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
37  armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
38  BOOST_CHECK(optNet);
39  // validate workloads
41  ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
42 
43  const armnn::Graph& theGraph = GetGraphForTesting(optNet.get());
44  for (auto&& layer : theGraph)
45  {
46  BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
47  BOOST_CHECK_NO_THROW(
48  layer->CreateWorkload(fact));
49  }
50 }
51 
52 BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnGpuAcc)
53 {
54  // Test to check when Fp16 Turbo mode set
55  // it converts the Fp32 network to Fp16 Network
56  // add Fp32ToFp16 conversion layer after the InputLayer
57  // add Fp16ToFp32 conversion layer after the OutputLayer
58  // checks the other layers if they are supported in Fp16
59  // if they are not put the conversion layers before and after
60  // if they are not supported in Fp16 use Fp32 instead
61  // if there are inverse conversion layers remove them with optimization
62  // at the moment FloorLayer is not supported in Fp16 so it rolls back to Fp32
63  // and inverse conversion layers are removed by the optimizer
65 
66  // Defines layers.
67  auto input = net->AddInputLayer(0, "input layer");
68  // ReLu1
69  armnn::ActivationDescriptor activation1Descriptor;
70  activation1Descriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
71  activation1Descriptor.m_A = 1.f;
72  activation1Descriptor.m_B = -1.f;
73  auto activation = net->AddActivationLayer(activation1Descriptor, "activation layer");
74  auto output = net->AddOutputLayer(0, "output layer");
75 
76  // Connects layers.
77  input->GetOutputSlot(0).Connect(activation->GetInputSlot(0));
78  activation->GetOutputSlot(0).Connect(output->GetInputSlot(0));
79 
80  armnn::TensorShape shape({4});
82  input->GetOutputSlot(0).SetTensorInfo(info);
83  activation->GetOutputSlot(0).SetTensorInfo(info);
84 
87 
88  std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
89 
90  armnn::OptimizerOptions optimizerOptions;
91  optimizerOptions.m_ReduceFp32ToFp16 = true;
92 
94  *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
95 
96  const armnn::Graph& graph = GetGraphForTesting(optimizedNet.get());
97 
98  // Tests that all layers are present in the graph.
99  BOOST_TEST(graph.GetNumLayers() == 5);
100 
101  // Tests that the vertices exist and have correct names.
102  BOOST_TEST(GraphHasNamedLayer(graph, "input layer"));
103  BOOST_TEST(GraphHasNamedLayer(graph, "convert_fp32_to_fp16-0-input layer"));
104  BOOST_TEST(GraphHasNamedLayer(graph, "activation layer"));
105  BOOST_TEST(GraphHasNamedLayer(graph, "convert_fp16_to_fp32-0-output layer"));
106  BOOST_TEST(GraphHasNamedLayer(graph, "output layer"));
107 }
108 
109 BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnGpuAcc)
110 {
112 
113  armnn::IConnectableLayer* input = net->AddInputLayer(0);
114  armnn::IConnectableLayer* output = net->AddOutputLayer(0);
115 
116  input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
118 
121 
122  std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
123  armnn::OptimizerOptions optimizerOptions;
124  armnn::BackendOptions modelOptions("GpuAcc", {{"FastMathEnabled", true}});
125  optimizerOptions.m_ModelOptions.push_back(modelOptions);
126 
128  *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
129 
130  BOOST_CHECK(optimizedNet);
131 
132  auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
133 
134  BOOST_TEST(modelOptionsOut.size() == 1);
135  BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
136  BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
137 }
138 
139 BOOST_AUTO_TEST_CASE(CheckMLGOTuningFile)
140 {
141  class ClBackendContextTestClass : public armnn::ClBackendContext
142  {
143  public:
144  ClBackendContextTestClass(const armnn::IRuntime::CreationOptions &options) : ClBackendContext(options)
145  {}
146 
147  bool call_reload_from_file()
148  {
149  return m_MLGOTuner.reload_from_file(m_MLGOTuningFile);
150  }
151  };
152 
153  const std::string validText{
154  "<header>\n"
155  "gemm-version, [1,2,1]\n"
156  "ip-type,gpu\n"
157  "</header>\n"
158  "<heuristics-table>\n"
159  "0, g71 , 8, f32, best-performance, static, gemm-type, [m,n,k,n]\n"
160  "1, g71 , 8, f32, best-performance, static, gemm-config-reshaped-only-rhs, [m,n,k,n]\n"
161  "2, g71 , 8, f32, best-performance, static, gemm-config-reshaped, [m,n,k,n]\n"
162  "3, g71 , 8, qasymm8, best-performance, static, gemm-type, [m,n,k,n]\n"
163  "4, g71 , 8, qasymm8, best-performance, static, gemm-config-reshaped-only-rhs, [m,n,k,n]\n"
164  "5, g71 , 8, qasymm8, best-performance, static, gemm-config-native, [m,n,k,n]\n"
165  "</heuristics-table>\n"
166  "<heuristic, 0>\n"
167  "b , 0, var, r_mn, >=, num, 2., 1, 2\n"
168  "l , 1, gemm-type, reshaped\n"
169  "l , 2, gemm-type, reshaped-only-rhs\n"
170  "</heuristic>\n"
171  "<heuristic, 1>\n"
172  "l ,0,gemm-config-reshaped-only-rhs, [2, 4,4,4,1,1,0]\n"
173  "</heuristic>\n"
174  "<heuristic, 2>\n"
175  "l ,0,gemm-config-reshaped,[4,2,8,16,16,1,0,1,0]\n"
176  "</heuristic>\n"
177  "<heuristic, 3>\n"
178  "l , 0, gemm-type, native\n"
179  "</heuristic>\n"
180  "<heuristic, 4>\n"
181  "l ,0,gemm-config-reshaped-only-rhs, [2, 4,4,4,1,1,0]\n"
182  "</heuristic>\n"
183  "<heuristic, 5>\n"
184  "l ,0,gemm-config-native,[4,2,8]\n"
185  "</heuristic>\n"};
186 
187  const std::string invalidText{"ʕノ•ᴥ•ʔノ ︵ ┻━┻"};
188 
189  fs::path validFile = armnnUtils::Filesystem::NamedTempFile("validFile.mlgo");
190  fs::path invalidFile = armnnUtils::Filesystem::NamedTempFile("invalidFile.mlgo");
191 
192  try
193  {
194  std::ofstream ofs1{validFile};
195  ofs1 << validText << std::endl;
196  ofs1.close();
197 
198  std::ofstream ofs2{invalidFile};
199  ofs2 << invalidText << std::endl;
200  ofs2.close();
201  }
202  catch (std::exception &e)
203  {
204  std::cerr << "Unable to write to file at location [" << validFile.c_str() << "] : " << e.what() << std::endl;
205  BOOST_TEST(false);
206  }
207 
208  armnn::IRuntime::CreationOptions creationOptions1;
209  armnn::BackendOptions validOptions
210  {
211  "GpuAcc",
212  {
213  {"MLGOTuningFilePath", validFile.c_str()}
214  }
215  };
216 
217  creationOptions1.m_BackendOptions.emplace_back(validOptions);
218  ClBackendContextTestClass clBackendContext1(creationOptions1);
219  BOOST_TEST(clBackendContext1.call_reload_from_file());
220 
221  armnn::BackendOptions invalidOptions
222  {
223  "GpuAcc",
224  {
225  {"MLGOTuningFilePath", invalidFile.c_str()}
226  }
227  };
228 
229  armnn::IRuntime::CreationOptions creationOptions2;
230  creationOptions2.m_BackendOptions.emplace_back(invalidOptions);
231  ClBackendContextTestClass clBackendContext2(creationOptions2);
232  BOOST_TEST(clBackendContext2.call_reload_from_file() == false);
233 
234  armnn::BackendOptions invalidPathOptions
235  {
236  "GpuAcc",
237  {
238  {"MLGOTuningFilePath", "not_a_real_file_path"}
239  }
240  };
241 
242  armnn::IRuntime::CreationOptions creationOptions3;
243  creationOptions3.m_BackendOptions.emplace_back(invalidPathOptions);
244  ClBackendContextTestClass clBackendContext3(creationOptions3);
245  BOOST_TEST(clBackendContext3.call_reload_from_file() == false);
246 }
247 
BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
ModelOptions m_ModelOptions
Definition: INetwork.hpp:168
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:37
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
bool GraphHasNamedLayer(const armnn::Graph &graph, const std::string &name)
Definition: GraphUtils.cpp:10
ClBackendContext(const IRuntime::CreationOptions &options)
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
std::vector< BackendOptions > m_BackendOptions
Pass backend specific options.
Definition: IRuntime.hpp:116
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1502
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174
GPU Execution: OpenCL: ArmCompute.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:25
min(a, max(b, input)) ReLu1 & ReLu6.
Graph & GetGraphForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:25
Struct for the users to pass backend specific options.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:50
BOOST_AUTO_TEST_SUITE_END()
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
ModelOptions & GetModelOptionsForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:30
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
fs::path NamedTempFile(const char *fileName)
Construct a temporary file name.
Definition: Filesystem.cpp:23
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
virtual int Connect(IInputSlot &destination)=0
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:510
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:52
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:48
BOOST_AUTO_TEST_CASE(OptimizeValidateGpuDeviceSupportLayerNoFallback)