ArmNN
 20.02
JsonPrinterTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <Profiling.hpp>
9 
10 #include <armnn/Descriptors.hpp>
11 #include <armnn/IRuntime.hpp>
12 #include <armnn/INetwork.hpp>
13 
14 #include <boost/algorithm/string.hpp>
15 #include <boost/lexical_cast.hpp>
16 #include <boost/test/unit_test.hpp>
17 
18 #include <sstream>
19 #include <stack>
20 #include <string>
21 
22 inline bool AreMatchingPair(const char opening, const char closing)
23 {
24  return (opening == '{' && closing == '}') || (opening == '[' && closing == ']');
25 }
26 
27 bool AreParenthesesMatching(const std::string& exp)
28 {
29  std::stack<char> expStack;
30  for (size_t i = 0; i < exp.length(); ++i)
31  {
32  if (exp[i] == '{' || exp[i] == '[')
33  {
34  expStack.push(exp[i]);
35  }
36  else if (exp[i] == '}' || exp[i] == ']')
37  {
38  if (expStack.empty() || !AreMatchingPair(expStack.top(), exp[i]))
39  {
40  return false;
41  }
42  else
43  {
44  expStack.pop();
45  }
46  }
47  }
48  return expStack.empty();
49 }
50 
51 std::vector<double> ExtractMeasurements(const std::string& exp)
52 {
53  std::vector<double> numbers;
54  bool inArray = false;
55  std::string numberString;
56  for (size_t i = 0; i < exp.size(); ++i)
57  {
58  if (exp[i] == '[')
59  {
60  inArray = true;
61  }
62  else if (exp[i] == ']' && inArray)
63  {
64  try
65  {
66  boost::trim_if(numberString, boost::is_any_of("\t,\n"));
67  numbers.push_back(std::stod(numberString));
68  }
69  catch (std::invalid_argument const& e)
70  {
71  BOOST_FAIL("Could not convert measurements to double: " + numberString);
72  }
73 
74  numberString.clear();
75  inArray = false;
76  }
77  else if (exp[i] == ',' && inArray)
78  {
79  try
80  {
81  boost::trim_if(numberString, boost::is_any_of("\t,\n"));
82  numbers.push_back(std::stod(numberString));
83  }
84  catch (std::invalid_argument const& e)
85  {
86  BOOST_FAIL("Could not convert measurements to double: " + numberString);
87  }
88  numberString.clear();
89  }
90  else if (exp[i] != '[' && inArray && exp[i] != ',' && exp[i] != ' ')
91  {
92  numberString += exp[i];
93  }
94  }
95  return numbers;
96 }
97 
98 std::vector<std::string> ExtractSections(const std::string& exp)
99 {
100  std::vector<std::string> sections;
101 
102  std::stack<size_t> s;
103  for (size_t i = 0; i < exp.size(); i++)
104  {
105  if (exp.at(i) == '{')
106  {
107  s.push(i);
108  }
109  else if (exp.at(i) == '}')
110  {
111  size_t from = s.top();
112  s.pop();
113  sections.push_back(exp.substr(from, i - from + 1));
114  }
115  }
116 
117  return sections;
118 }
119 
120 std::string GetSoftmaxProfilerJson(const std::vector<armnn::BackendId>& backends)
121 {
122  using namespace armnn;
123 
124  BOOST_CHECK(!backends.empty());
125 
127 
128  // Create runtime in which test will run
130  options.m_EnableGpuProfiling = backends.front() == armnn::Compute::GpuAcc;
131  IRuntimePtr runtime(IRuntime::Create(options));
132 
133  // build up the structure of the network
135 
136  IConnectableLayer* input = net->AddInputLayer(0, "input");
137  SoftmaxDescriptor softmaxDescriptor;
138  // Set Axis to 1 if CL or Neon until further Axes are supported.
139  if ( backends.front() == armnn::Compute::CpuAcc || backends.front() == armnn::Compute::GpuAcc)
140  {
141  softmaxDescriptor.m_Axis = 1;
142  }
143  IConnectableLayer* softmax = net->AddSoftmaxLayer(softmaxDescriptor, "softmax");
144  IConnectableLayer* output = net->AddOutputLayer(0, "output");
145 
146  input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
147  softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
148 
149  // set the tensors in the network
150  TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
151  inputTensorInfo.SetQuantizationOffset(100);
152  inputTensorInfo.SetQuantizationScale(10000.0f);
153  input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
154 
155  TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QAsymmU8);
156  outputTensorInfo.SetQuantizationOffset(0);
157  outputTensorInfo.SetQuantizationScale(1.0f / 256.0f);
158  softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
159 
160  // optimize the network
161  IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
162  if(!optNet)
163  {
164  BOOST_FAIL("Error occurred during Optimization, Optimize() returned nullptr.");
165  }
166  // load it into the runtime
167  NetworkId netId;
168  auto error = runtime->LoadNetwork(netId, std::move(optNet));
169  BOOST_TEST(error == Status::Success);
170 
171  // create structures for input & output
172  std::vector<uint8_t> inputData
173  {
174  1, 10, 3, 200, 5
175  // one of inputs is sufficiently larger than the others to saturate softmax
176  };
177  std::vector<uint8_t> outputData(5);
178 
179  armnn::InputTensors inputTensors
180  {
181  {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
182  };
183  armnn::OutputTensors outputTensors
184  {
185  {0, armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
186  };
187 
188  runtime->GetProfiler(netId)->EnableProfiling(true);
189 
190  // do the inferences
191  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
192  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
193  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
194 
195  // retrieve the Profiler.Print() output
196  std::stringstream ss;
197  profilerManager.GetProfiler()->Print(ss);
198 
199  return ss.str();
200 }
201 
202 inline void ValidateProfilerJson(std::string& result)
203 {
204  // ensure all measurements are greater than zero
205  std::vector<double> measurementsVector = ExtractMeasurements(result);
206  BOOST_CHECK(!measurementsVector.empty());
207 
208  // check sections contain raw and unit tags
209  // first ensure Parenthesis are balanced
210  if (AreParenthesesMatching(result))
211  {
212  // remove parent sections that will not have raw or unit tag
213  std::vector<std::string> sectionVector = ExtractSections(result);
214  for (size_t i = 0; i < sectionVector.size(); ++i)
215  {
216  if (boost::contains(sectionVector[i], "\"ArmNN\":")
217  || boost::contains(sectionVector[i], "\"inference_measurements\":"))
218  {
219  sectionVector.erase(sectionVector.begin() + static_cast<int>(i));
220  }
221  }
222  BOOST_CHECK(!sectionVector.empty());
223 
224  BOOST_CHECK(std::all_of(sectionVector.begin(), sectionVector.end(),
225  [](std::string i) { return boost::contains(i, "\"raw\":"); }));
226 
227  BOOST_CHECK(std::all_of(sectionVector.begin(), sectionVector.end(),
228  [](std::string i) { return boost::contains(i, "\"unit\":"); }));
229  }
230 
231  // remove the time measurements as they vary from test to test
232  result.erase(std::remove_if (result.begin(),result.end(),
233  [](char c) { return c == '.'; }), result.end());
234  result.erase(std::remove_if (result.begin(), result.end(), &isdigit), result.end());
235  result.erase(std::remove_if (result.begin(),result.end(),
236  [](char c) { return c == '\t'; }), result.end());
237 
238  BOOST_CHECK(boost::contains(result, "ArmNN"));
239  BOOST_CHECK(boost::contains(result, "inference_measurements"));
240 
241  // ensure no spare parenthesis present in print output
243 }
244 
245 void RunSoftmaxProfilerJsonPrinterTest(const std::vector<armnn::BackendId>& backends)
246 {
247  // setup the test fixture and obtain JSON Printer result
248  std::string result = GetSoftmaxProfilerJson(backends);
249 
250  // validate the JSON Printer result
251  ValidateProfilerJson(result);
252 
253  const armnn::BackendId& firstBackend = backends.at(0);
254  if (firstBackend == armnn::Compute::GpuAcc)
255  {
256  BOOST_CHECK(boost::contains(result,
257  "OpenClKernelTimer/: softmax_layer_max_shift_exp_sum_quantized_serial GWS[,,]"));
258  }
259  else if (firstBackend == armnn::Compute::CpuAcc)
260  {
261  BOOST_CHECK(boost::contains(result,
262  "NeonKernelTimer/: NEFillBorderKernel"));
263  }
264 }
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:32
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
static ProfilerManager & GetInstance()
Definition: Profiling.cpp:487
void RunSoftmaxProfilerJsonPrinterTest(const std::vector< armnn::BackendId > &backends)
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:24
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:225
int NetworkId
Definition: IRuntime.hpp:19
Copyright (c) 2020 ARM Limited.
std::string GetSoftmaxProfilerJson(const std::vector< armnn::BackendId > &backends)
BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::WaitingForAck)
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
bool AreMatchingPair(const char opening, const char closing)
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:191
std::vector< std::string > ExtractSections(const std::string &exp)
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:890
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:199
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:226
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:566
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
GPU Execution: OpenCL: ArmCompute.
bool m_EnableGpuProfiling
Setting this flag will allow the user to obtain GPU profiling information from the runtime...
Definition: IRuntime.hpp:54
CPU Execution: NEON: ArmCompute.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:275
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:101
virtual int Connect(IInputSlot &destination)=0
armnn::Runtime::CreationOptions::ExternalProfilingOptions options
std::vector< double > ExtractMeasurements(const std::string &exp)
void ValidateProfilerJson(std::string &result)
A SoftmaxDescriptor for the SoftmaxLayer.
static INetworkPtr Create()
Definition: Network.cpp:49
bool AreParenthesesMatching(const std::string &exp)