ArmNN
 21.08
NetworkExecutionUtils.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
9 #include <InferenceTest.hpp>
10 #include <ResolveType.hpp>
11 
12 #if defined(ARMNN_SERIALIZER)
14 #endif
15 #if defined(ARMNN_TF_LITE_PARSER)
17 #endif
18 #if defined(ARMNN_ONNX_PARSER)
20 #endif
21 
22 template<armnn::DataType NonQuantizedType>
23 auto ParseDataArray(std::istream& stream);
24 
25 template<armnn::DataType QuantizedType>
26 auto ParseDataArray(std::istream& stream,
27  const float& quantizationScale,
28  const int32_t& quantizationOffset);
29 
30 template<>
31 auto ParseDataArray<armnn::DataType::Float32>(std::istream& stream)
32 {
33  return ParseArrayImpl<float>(stream, [](const std::string& s) { return std::stof(s); });
34 }
35 
36 template<>
37 auto ParseDataArray<armnn::DataType::Signed32>(std::istream& stream)
38 {
39  return ParseArrayImpl<int>(stream, [](const std::string& s) { return std::stoi(s); });
40 }
41 
42 template<>
43 auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream)
44 {
45  return ParseArrayImpl<int8_t>(stream,
46  [](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
47 }
48 
49 template<>
50 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
51 {
52  return ParseArrayImpl<uint8_t>(stream,
53  [](const std::string& s) { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
54 }
55 
56 
57 template<>
58 auto ParseDataArray<armnn::DataType::QSymmS8>(std::istream& stream)
59 {
60  return ParseArrayImpl<int8_t>(stream,
61  [](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
62 }
63 
64 template<>
65 auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream,
66  const float& quantizationScale,
67  const int32_t& quantizationOffset)
68 {
69  return ParseArrayImpl<int8_t>(stream,
70  [&quantizationScale, &quantizationOffset](const std::string& s)
71  {
72  return armnn::numeric_cast<int8_t>(
73  armnn::Quantize<int8_t>(std::stof(s),
74  quantizationScale,
75  quantizationOffset));
76  });
77 }
78 
79 template<>
80 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
81  const float& quantizationScale,
82  const int32_t& quantizationOffset)
83 {
84  return ParseArrayImpl<uint8_t>(stream,
85  [&quantizationScale, &quantizationOffset](const std::string& s)
86  {
87  return armnn::numeric_cast<uint8_t>(
88  armnn::Quantize<uint8_t>(std::stof(s),
89  quantizationScale,
90  quantizationOffset));
91  });
92 }
93 
94 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
95 std::vector<T> GenerateDummyTensorData(unsigned int numElements)
96 {
97  return std::vector<T>(numElements, static_cast<T>(0));
98 }
99 
100 
101 std::vector<unsigned int> ParseArray(std::istream& stream)
102 {
103  return ParseArrayImpl<unsigned int>(
104  stream,
105  [](const std::string& s) { return armnn::numeric_cast<unsigned int>(std::stoi(s)); });
106 }
107 
108 std::vector<std::string> ParseStringList(const std::string& inputString, const char* delimiter)
109 {
110  std::stringstream stream(inputString);
111  return ParseArrayImpl<std::string>(stream, [](const std::string& s) {
112  return armnn::stringUtils::StringTrimCopy(s); }, delimiter);
113 }
114 
115 
116 TensorPrinter::TensorPrinter(const std::string& binding,
117  const armnn::TensorInfo& info,
118  const std::string& outputTensorFile,
119  bool dequantizeOutput)
120  : m_OutputBinding(binding)
121  , m_Scale(info.GetQuantizationScale())
122  , m_Offset(info.GetQuantizationOffset())
123  , m_OutputTensorFile(outputTensorFile)
124  , m_DequantizeOutput(dequantizeOutput) {}
125 
126 void TensorPrinter::operator()(const std::vector<float>& values)
127 {
128  ForEachValue(values, [](float value)
129  {
130  printf("%f ", value);
131  });
132  WriteToFile(values);
133 }
134 
135 void TensorPrinter::operator()(const std::vector<uint8_t>& values)
136 {
137  if(m_DequantizeOutput)
138  {
139  auto& scale = m_Scale;
140  auto& offset = m_Offset;
141  std::vector<float> dequantizedValues;
142  ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value)
143  {
144  auto dequantizedValue = armnn::Dequantize(value, scale, offset);
145  printf("%f ", dequantizedValue);
146  dequantizedValues.push_back(dequantizedValue);
147  });
148  WriteToFile(dequantizedValues);
149  }
150  else
151  {
152  const std::vector<int> intValues(values.begin(), values.end());
153  operator()(intValues);
154  }
155 }
156 
157 void TensorPrinter::operator()(const std::vector<int8_t>& values)
158 {
159  ForEachValue(values, [](int8_t value)
160  {
161  printf("%d ", value);
162  });
163  WriteToFile(values);
164 }
165 
166 void TensorPrinter::operator()(const std::vector<int>& values)
167 {
168  ForEachValue(values, [](int value)
169  {
170  printf("%d ", value);
171  });
172  WriteToFile(values);
173 }
174 
175 template<typename Container, typename Delegate>
176 void TensorPrinter::ForEachValue(const Container& c, Delegate delegate)
177 {
178  std::cout << m_OutputBinding << ": ";
179  for (const auto& value : c)
180  {
181  delegate(value);
182  }
183  printf("\n");
184 }
185 
186 template<typename T>
187 void TensorPrinter::WriteToFile(const std::vector<T>& values)
188 {
189  if (!m_OutputTensorFile.empty())
190  {
191  std::ofstream outputTensorFile;
192  outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc);
193  if (outputTensorFile.is_open())
194  {
195  outputTensorFile << m_OutputBinding << ": ";
196  std::copy(values.begin(), values.end(), std::ostream_iterator<T>(outputTensorFile, " "));
197  }
198  else
199  {
200  ARMNN_LOG(info) << "Output Tensor File: " << m_OutputTensorFile << " could not be opened!";
201  }
202  outputTensorFile.close();
203  }
204 }
205 
206 using TContainer =
207  mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
208 using QuantizationParams = std::pair<float, int32_t>;
209 
211  unsigned int numElements,
212  const std::string& dataTypeStr,
214  const armnn::Optional<std::string>& dataFile)
215 {
216  const bool readFromFile = dataFile.has_value() && !dataFile.value().empty();
217  const bool quantizeData = qParams.has_value();
218 
219  std::ifstream inputTensorFile;
220  if (readFromFile)
221  {
222  inputTensorFile = std::ifstream(dataFile.value());
223  }
224 
225  if (dataTypeStr.compare("float") == 0)
226  {
227  if (quantizeData)
228  {
229  const float qScale = qParams.value().first;
230  const int qOffset = qParams.value().second;
231 
232  tensorData = readFromFile ?
233  ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) :
234  GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
235  }
236  else
237  {
238  tensorData = readFromFile ?
239  ParseDataArray<armnn::DataType::Float32>(inputTensorFile) :
240  GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
241  }
242  }
243  else if (dataTypeStr.compare("int") == 0)
244  {
245  tensorData = readFromFile ?
246  ParseDataArray<armnn::DataType::Signed32>(inputTensorFile) :
247  GenerateDummyTensorData<armnn::DataType::Signed32>(numElements);
248  }
249  else if (dataTypeStr.compare("qsymms8") == 0)
250  {
251  tensorData = readFromFile ?
252  ParseDataArray<armnn::DataType::QSymmS8>(inputTensorFile) :
253  GenerateDummyTensorData<armnn::DataType::QSymmS8>(numElements);
254  }
255  else if (dataTypeStr.compare("qasymm8") == 0 || dataTypeStr.compare("qasymmu8") == 0)
256  {
257  tensorData = readFromFile ?
258  ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
259  GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
260  }
261  else if (dataTypeStr.compare("qasymms8") == 0)
262  {
263  tensorData = readFromFile ?
264  ParseDataArray<armnn::DataType::QAsymmS8>(inputTensorFile) :
265  GenerateDummyTensorData<armnn::DataType::QAsymmS8>(numElements);
266  }
267  else
268  {
269  std::string errorMessage = "Unsupported tensor data type " + dataTypeStr;
270  ARMNN_LOG(fatal) << errorMessage;
271 
272  inputTensorFile.close();
273  throw armnn::Exception(errorMessage);
274  }
275 
276  inputTensorFile.close();
277 }
278 
279 bool ValidatePath(const std::string& file, const bool expectFile)
280 {
281  if (!fs::exists(file))
282  {
283  std::cerr << "Given file path '" << file << "' does not exist" << std::endl;
284  return false;
285  }
286  if (!fs::is_regular_file(file) && expectFile)
287  {
288  std::cerr << "Given file path '" << file << "' is not a regular file" << std::endl;
289  return false;
290  }
291  return true;
292 }
293 
294 bool ValidatePaths(const std::vector<std::string>& fileVec, const bool expectFile)
295 {
296  bool allPathsValid = true;
297  for (auto const& file : fileVec)
298  {
299  if(!ValidatePath(file, expectFile))
300  {
301  allPathsValid = false;
302  }
303  }
304  return allPathsValid;
305 }
306 
307 
308 
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:46
std::vector< unsigned int > ParseArray(std::istream &stream)
void operator()(const std::vector< float > &values)
std::vector< std::string > ParseStringList(const std::string &inputString, const char *delimiter)
Splits a given string at every accurance of delimiter into a vector of string.
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
std::vector< T > GenerateDummyTensorData(unsigned int numElements)
void PopulateTensorWithData(TContainer &tensorData, unsigned int numElements, const std::string &dataTypeStr, const armnn::Optional< QuantizationParams > &qParams, const armnn::Optional< std::string > &dataFile)
std::string StringTrimCopy(const std::string &str, const std::string &chars="\\\")
Trim from both the start and the end of a string, returns a trimmed copy of the string.
Definition: StringUtils.hpp:85
auto ParseDataArray(std::istream &stream)
bool has_value() const noexcept
Definition: Optional.hpp:53
mapbox::util::variant< std::vector< float >, std::vector< int >, std::vector< unsigned char >, std::vector< int8_t > > TContainer
bool ValidatePaths(const std::vector< std::string > &fileVec, const bool expectFile)
Verifies if a given vector of strings are valid paths.
TensorPrinter(const std::string &binding, const armnn::TensorInfo &info, const std::string &outputTensorFile, bool dequantizeOutput)
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
bool ValidatePath(const std::string &file, const bool expectFile)
Verifies if the given string is a valid path.
std::pair< float, int32_t > QuantizationParams