ArmNN
 22.05.01
NetworkExecutionUtils.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
9 #include <InferenceTest.hpp>
10 #include <ResolveType.hpp>
11 
12 #if defined(ARMNN_SERIALIZER)
14 #endif
15 #if defined(ARMNN_TF_LITE_PARSER)
17 #endif
18 #if defined(ARMNN_ONNX_PARSER)
20 #endif
21 
22 template<armnn::DataType NonQuantizedType>
23 auto ParseDataArray(std::istream& stream);
24 
25 template<armnn::DataType QuantizedType>
26 auto ParseDataArray(std::istream& stream,
27  const float& quantizationScale,
28  const int32_t& quantizationOffset);
29 
30 template<>
31 auto ParseDataArray<armnn::DataType::Float32>(std::istream& stream)
32 {
33  return ParseArrayImpl<float>(stream, [](const std::string& s) { return std::stof(s); });
34 }
35 
36 template<>
37 auto ParseDataArray<armnn::DataType::Signed32>(std::istream& stream)
38 {
39  return ParseArrayImpl<int>(stream, [](const std::string& s) { return std::stoi(s); });
40 }
41 
42 template<>
43 auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream)
44 {
45  return ParseArrayImpl<int8_t>(stream,
46  [](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
47 }
48 
49 template<>
50 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
51 {
52  return ParseArrayImpl<uint8_t>(stream,
53  [](const std::string& s) { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
54 }
55 
56 
57 template<>
58 auto ParseDataArray<armnn::DataType::QSymmS8>(std::istream& stream)
59 {
60  return ParseArrayImpl<int8_t>(stream,
61  [](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
62 }
63 
64 template<>
65 auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream,
66  const float& quantizationScale,
67  const int32_t& quantizationOffset)
68 {
69  return ParseArrayImpl<int8_t>(stream,
70  [&quantizationScale, &quantizationOffset](const std::string& s)
71  {
72  return armnn::numeric_cast<int8_t>(
73  armnn::Quantize<int8_t>(std::stof(s),
74  quantizationScale,
75  quantizationOffset));
76  });
77 }
78 
79 template<>
80 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
81  const float& quantizationScale,
82  const int32_t& quantizationOffset)
83 {
84  return ParseArrayImpl<uint8_t>(stream,
85  [&quantizationScale, &quantizationOffset](const std::string& s)
86  {
87  return armnn::numeric_cast<uint8_t>(
88  armnn::Quantize<uint8_t>(std::stof(s),
89  quantizationScale,
90  quantizationOffset));
91  });
92 }
93 
94 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
95 std::vector<T> GenerateDummyTensorData(unsigned int numElements)
96 {
97  return std::vector<T>(numElements, static_cast<T>(0));
98 }
99 
100 
101 std::vector<unsigned int> ParseArray(std::istream& stream)
102 {
103  return ParseArrayImpl<unsigned int>(
104  stream,
105  [](const std::string& s) { return armnn::numeric_cast<unsigned int>(std::stoi(s)); });
106 }
107 
108 std::vector<std::string> ParseStringList(const std::string& inputString, const char* delimiter)
109 {
110  std::stringstream stream(inputString);
111  return ParseArrayImpl<std::string>(stream, [](const std::string& s) {
112  return armnn::stringUtils::StringTrimCopy(s); }, delimiter);
113 }
114 
115 
116 TensorPrinter::TensorPrinter(const std::string& binding,
117  const armnn::TensorInfo& info,
118  const std::string& outputTensorFile,
119  bool dequantizeOutput,
120  const bool printToConsole)
121  : m_OutputBinding(binding)
122  , m_Scale(info.GetQuantizationScale())
123  , m_Offset(info.GetQuantizationOffset())
124  , m_OutputTensorFile(outputTensorFile)
125  , m_DequantizeOutput(dequantizeOutput)
126  , m_PrintToConsole(printToConsole) {}
127 
128 void TensorPrinter::operator()(const std::vector<float>& values)
129 {
130  if (m_PrintToConsole)
131  {
132  std::cout << m_OutputBinding << ": ";
133  ForEachValue(values, [](float value)
134  {
135  printf("%f ", value);
136  });
137  printf("\n");
138  }
139  WriteToFile(values);
140 }
141 
142 void TensorPrinter::operator()(const std::vector<uint8_t>& values)
143 {
144  if(m_DequantizeOutput)
145  {
146  auto& scale = m_Scale;
147  auto& offset = m_Offset;
148  std::vector<float> dequantizedValues;
149  ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value)
150  {
151  auto dequantizedValue = armnn::Dequantize(value, scale, offset);
152  dequantizedValues.push_back(dequantizedValue);
153  });
154 
155  if (m_PrintToConsole)
156  {
157  std::cout << m_OutputBinding << ": ";
158  ForEachValue(dequantizedValues, [](float value)
159  {
160  printf("%f ", value);
161  });
162  printf("\n");
163  }
164 
165  WriteToFile(dequantizedValues);
166  }
167  else
168  {
169  const std::vector<int> intValues(values.begin(), values.end());
170  operator()(intValues);
171  }
172 }
173 
174 void TensorPrinter::operator()(const std::vector<int8_t>& values)
175 {
176  if (m_PrintToConsole)
177  {
178  std::cout << m_OutputBinding << ": ";
179  ForEachValue(values, [](int8_t value)
180  {
181  printf("%d ", value);
182  });
183  printf("\n");
184  }
185  WriteToFile(values);
186 }
187 
188 void TensorPrinter::operator()(const std::vector<int>& values)
189 {
190  if (m_PrintToConsole)
191  {
192  std::cout << m_OutputBinding << ": ";
193  ForEachValue(values, [](int value)
194  {
195  printf("%d ", value);
196  });
197  printf("\n");
198  }
199  WriteToFile(values);
200 }
201 
202 template<typename Container, typename Delegate>
203 void TensorPrinter::ForEachValue(const Container& c, Delegate delegate)
204 {
205  for (const auto& value : c)
206  {
207  delegate(value);
208  }
209 }
210 
211 template<typename T>
212 void TensorPrinter::WriteToFile(const std::vector<T>& values)
213 {
214  if (!m_OutputTensorFile.empty())
215  {
216  std::ofstream outputTensorFile;
217  outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc);
218  if (outputTensorFile.is_open())
219  {
220  outputTensorFile << m_OutputBinding << ": ";
221  std::copy(values.begin(), values.end(), std::ostream_iterator<T>(outputTensorFile, " "));
222  }
223  else
224  {
225  ARMNN_LOG(info) << "Output Tensor File: " << m_OutputTensorFile << " could not be opened!";
226  }
227  outputTensorFile.close();
228  }
229 }
230 
232  unsigned int numElements,
233  const std::string& dataTypeStr,
235  const armnn::Optional<std::string>& dataFile)
236 {
237  const bool readFromFile = dataFile.has_value() && !dataFile.value().empty();
238  const bool quantizeData = qParams.has_value();
239 
240  std::ifstream inputTensorFile;
241  if (readFromFile)
242  {
243  inputTensorFile = std::ifstream(dataFile.value());
244  }
245 
246  if (dataTypeStr.compare("float") == 0)
247  {
248  if (quantizeData)
249  {
250  const float qScale = qParams.value().first;
251  const int qOffset = qParams.value().second;
252 
253  tensorData = readFromFile ?
254  ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) :
255  GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
256  }
257  else
258  {
259  tensorData = readFromFile ?
260  ParseDataArray<armnn::DataType::Float32>(inputTensorFile) :
261  GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
262  }
263  }
264  else if (dataTypeStr.compare("int") == 0)
265  {
266  tensorData = readFromFile ?
267  ParseDataArray<armnn::DataType::Signed32>(inputTensorFile) :
268  GenerateDummyTensorData<armnn::DataType::Signed32>(numElements);
269  }
270  else if (dataTypeStr.compare("qsymms8") == 0)
271  {
272  tensorData = readFromFile ?
273  ParseDataArray<armnn::DataType::QSymmS8>(inputTensorFile) :
274  GenerateDummyTensorData<armnn::DataType::QSymmS8>(numElements);
275  }
276  else if (dataTypeStr.compare("qasymm8") == 0 || dataTypeStr.compare("qasymmu8") == 0)
277  {
278  tensorData = readFromFile ?
279  ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
280  GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
281  }
282  else if (dataTypeStr.compare("qasymms8") == 0)
283  {
284  tensorData = readFromFile ?
285  ParseDataArray<armnn::DataType::QAsymmS8>(inputTensorFile) :
286  GenerateDummyTensorData<armnn::DataType::QAsymmS8>(numElements);
287  }
288  else
289  {
290  std::string errorMessage = "Unsupported tensor data type " + dataTypeStr;
291  ARMNN_LOG(fatal) << errorMessage;
292 
293  inputTensorFile.close();
294  throw armnn::Exception(errorMessage);
295  }
296 
297  inputTensorFile.close();
298 }
299 
300 bool ValidatePath(const std::string& file, const bool expectFile)
301 {
302  if (!fs::exists(file))
303  {
304  std::cerr << "Given file path '" << file << "' does not exist" << std::endl;
305  return false;
306  }
307  if (!fs::is_regular_file(file) && expectFile)
308  {
309  std::cerr << "Given file path '" << file << "' is not a regular file" << std::endl;
310  return false;
311  }
312  return true;
313 }
314 
315 bool ValidatePaths(const std::vector<std::string>& fileVec, const bool expectFile)
316 {
317  bool allPathsValid = true;
318  for (auto const& file : fileVec)
319  {
320  if(!ValidatePath(file, expectFile))
321  {
322  allPathsValid = false;
323  }
324  }
325  return allPathsValid;
326 }
327 
328 
329 
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:46
std::vector< unsigned int > ParseArray(std::istream &stream)
void operator()(const std::vector< float > &values)
std::vector< std::string > ParseStringList(const std::string &inputString, const char *delimiter)
Splits a given string at every accurance of delimiter into a vector of string.
#define ARMNN_LOG(severity)
Definition: Logging.hpp:205
std::vector< T > GenerateDummyTensorData(unsigned int numElements)
std::string StringTrimCopy(const std::string &str, const std::string &chars="\\\")
Trim from both the start and the end of a string, returns a trimmed copy of the string.
Definition: StringUtils.hpp:88
void PopulateTensorWithData(armnnUtils::TContainer &tensorData, unsigned int numElements, const std::string &dataTypeStr, const armnn::Optional< QuantizationParams > &qParams, const armnn::Optional< std::string > &dataFile)
auto ParseDataArray(std::istream &stream)
bool has_value() const noexcept
Definition: Optional.hpp:53
bool ValidatePaths(const std::vector< std::string > &fileVec, const bool expectFile)
Verifies if a given vector of strings are valid paths.
TensorPrinter(const std::string &binding, const armnn::TensorInfo &info, const std::string &outputTensorFile, bool dequantizeOutput, bool printToConsole=true)
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
mapbox::util::variant< std::vector< float >, std::vector< int >, std::vector< unsigned char >, std::vector< int8_t > > TContainer
Definition: TContainer.hpp:18
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
bool ValidatePath(const std::string &file, const bool expectFile)
Verifies if the given string is a valid path.