ArmNN
 21.05
NetworkExecutionUtils.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <Filesystem.hpp>
9 #include <InferenceTest.hpp>
10 #include <ResolveType.hpp>
11 
12 #if defined(ARMNN_SERIALIZER)
14 #endif
15 #if defined(ARMNN_TF_LITE_PARSER)
17 #endif
18 #if defined(ARMNN_ONNX_PARSER)
20 #endif
21 
22 template<armnn::DataType NonQuantizedType>
23 auto ParseDataArray(std::istream& stream);
24 
25 template<armnn::DataType QuantizedType>
26 auto ParseDataArray(std::istream& stream,
27  const float& quantizationScale,
28  const int32_t& quantizationOffset);
29 
30 template<>
31 auto ParseDataArray<armnn::DataType::Float32>(std::istream& stream)
32 {
33  return ParseArrayImpl<float>(stream, [](const std::string& s) { return std::stof(s); });
34 }
35 
36 template<>
37 auto ParseDataArray<armnn::DataType::Signed32>(std::istream& stream)
38 {
39  return ParseArrayImpl<int>(stream, [](const std::string& s) { return std::stoi(s); });
40 }
41 
42 template<>
43 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
44 {
45  return ParseArrayImpl<uint8_t>(stream,
46  [](const std::string& s) { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
47 }
48 
49 
50 template<>
51 auto ParseDataArray<armnn::DataType::QSymmS8>(std::istream& stream)
52 {
53  return ParseArrayImpl<int8_t>(stream,
54  [](const std::string& s) { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
55 }
56 
57 
58 
59 template<>
60 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
61  const float& quantizationScale,
62  const int32_t& quantizationOffset)
63 {
64  return ParseArrayImpl<uint8_t>(stream,
65  [&quantizationScale, &quantizationOffset](const std::string& s)
66  {
67  return armnn::numeric_cast<uint8_t>(
68  armnn::Quantize<uint8_t>(std::stof(s),
69  quantizationScale,
70  quantizationOffset));
71  });
72 }
73 
74 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
75 std::vector<T> GenerateDummyTensorData(unsigned int numElements)
76 {
77  return std::vector<T>(numElements, static_cast<T>(0));
78 }
79 
80 
81 std::vector<unsigned int> ParseArray(std::istream& stream)
82 {
83  return ParseArrayImpl<unsigned int>(
84  stream,
85  [](const std::string& s) { return armnn::numeric_cast<unsigned int>(std::stoi(s)); });
86 }
87 
88 std::vector<std::string> ParseStringList(const std::string& inputString, const char* delimiter)
89 {
90  std::stringstream stream(inputString);
91  return ParseArrayImpl<std::string>(stream, [](const std::string& s) {
92  return armnn::stringUtils::StringTrimCopy(s); }, delimiter);
93 }
94 
95 
96 TensorPrinter::TensorPrinter(const std::string& binding,
97  const armnn::TensorInfo& info,
98  const std::string& outputTensorFile,
99  bool dequantizeOutput)
100  : m_OutputBinding(binding)
101  , m_Scale(info.GetQuantizationScale())
102  , m_Offset(info.GetQuantizationOffset())
103  , m_OutputTensorFile(outputTensorFile)
104  , m_DequantizeOutput(dequantizeOutput) {}
105 
106 void TensorPrinter::operator()(const std::vector<float>& values)
107 {
108  ForEachValue(values, [](float value)
109  {
110  printf("%f ", value);
111  });
112  WriteToFile(values);
113 }
114 
115 void TensorPrinter::operator()(const std::vector<uint8_t>& values)
116 {
117  if(m_DequantizeOutput)
118  {
119  auto& scale = m_Scale;
120  auto& offset = m_Offset;
121  std::vector<float> dequantizedValues;
122  ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value)
123  {
124  auto dequantizedValue = armnn::Dequantize(value, scale, offset);
125  printf("%f ", dequantizedValue);
126  dequantizedValues.push_back(dequantizedValue);
127  });
128  WriteToFile(dequantizedValues);
129  }
130  else
131  {
132  const std::vector<int> intValues(values.begin(), values.end());
133  operator()(intValues);
134  }
135 }
136 
137 void TensorPrinter::operator()(const std::vector<int8_t>& values)
138 {
139  ForEachValue(values, [](int8_t value)
140  {
141  printf("%d ", value);
142  });
143  WriteToFile(values);
144 }
145 
146 void TensorPrinter::operator()(const std::vector<int>& values)
147 {
148  ForEachValue(values, [](int value)
149  {
150  printf("%d ", value);
151  });
152  WriteToFile(values);
153 }
154 
155 template<typename Container, typename Delegate>
156 void TensorPrinter::ForEachValue(const Container& c, Delegate delegate)
157 {
158  std::cout << m_OutputBinding << ": ";
159  for (const auto& value : c)
160  {
161  delegate(value);
162  }
163  printf("\n");
164 }
165 
166 template<typename T>
167 void TensorPrinter::WriteToFile(const std::vector<T>& values)
168 {
169  if (!m_OutputTensorFile.empty())
170  {
171  std::ofstream outputTensorFile;
172  outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc);
173  if (outputTensorFile.is_open())
174  {
175  outputTensorFile << m_OutputBinding << ": ";
176  std::copy(values.begin(), values.end(), std::ostream_iterator<T>(outputTensorFile, " "));
177  }
178  else
179  {
180  ARMNN_LOG(info) << "Output Tensor File: " << m_OutputTensorFile << " could not be opened!";
181  }
182  outputTensorFile.close();
183  }
184 }
185 
186 using TContainer =
187  mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
188 using QuantizationParams = std::pair<float, int32_t>;
189 
191  unsigned int numElements,
192  const std::string& dataTypeStr,
194  const armnn::Optional<std::string>& dataFile)
195 {
196  const bool readFromFile = dataFile.has_value() && !dataFile.value().empty();
197  const bool quantizeData = qParams.has_value();
198 
199  std::ifstream inputTensorFile;
200  if (readFromFile)
201  {
202  inputTensorFile = std::ifstream(dataFile.value());
203  }
204 
205  if (dataTypeStr.compare("float") == 0)
206  {
207  if (quantizeData)
208  {
209  const float qScale = qParams.value().first;
210  const int qOffset = qParams.value().second;
211 
212  tensorData = readFromFile ?
213  ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) :
214  GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
215  }
216  else
217  {
218  tensorData = readFromFile ?
219  ParseDataArray<armnn::DataType::Float32>(inputTensorFile) :
220  GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
221  }
222  }
223  else if (dataTypeStr.compare("int") == 0)
224  {
225  tensorData = readFromFile ?
226  ParseDataArray<armnn::DataType::Signed32>(inputTensorFile) :
227  GenerateDummyTensorData<armnn::DataType::Signed32>(numElements);
228  }
229  else if (dataTypeStr.compare("qsymms8") == 0)
230  {
231  tensorData = readFromFile ?
232  ParseDataArray<armnn::DataType::QSymmS8>(inputTensorFile) :
233  GenerateDummyTensorData<armnn::DataType::QSymmS8>(numElements);
234  }
235  else if (dataTypeStr.compare("qasymm8") == 0)
236  {
237  tensorData = readFromFile ?
238  ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
239  GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
240  }
241  else
242  {
243  std::string errorMessage = "Unsupported tensor data type " + dataTypeStr;
244  ARMNN_LOG(fatal) << errorMessage;
245 
246  inputTensorFile.close();
247  throw armnn::Exception(errorMessage);
248  }
249 
250  inputTensorFile.close();
251 }
252 
253 bool ValidatePath(const std::string& file, const bool expectFile)
254 {
255  if (!fs::exists(file))
256  {
257  std::cerr << "Given file path '" << file << "' does not exist" << std::endl;
258  return false;
259  }
260  if (!fs::is_regular_file(file) && expectFile)
261  {
262  std::cerr << "Given file path '" << file << "' is not a regular file" << std::endl;
263  return false;
264  }
265  return true;
266 }
267 
268 bool ValidatePaths(const std::vector<std::string>& fileVec, const bool expectFile)
269 {
270  bool allPathsValid = true;
271  for (auto const& file : fileVec)
272  {
273  if(!ValidatePath(file, expectFile))
274  {
275  allPathsValid = false;
276  }
277  }
278  return allPathsValid;
279 }
280 
281 
282 
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:46
std::vector< unsigned int > ParseArray(std::istream &stream)
void operator()(const std::vector< float > &values)
std::vector< std::string > ParseStringList(const std::string &inputString, const char *delimiter)
Splits a given string at every accurance of delimiter into a vector of string.
#define ARMNN_LOG(severity)
Definition: Logging.hpp:202
std::vector< T > GenerateDummyTensorData(unsigned int numElements)
void PopulateTensorWithData(TContainer &tensorData, unsigned int numElements, const std::string &dataTypeStr, const armnn::Optional< QuantizationParams > &qParams, const armnn::Optional< std::string > &dataFile)
std::string StringTrimCopy(const std::string &str, const std::string &chars="\\\")
Trim from both the start and the end of a string, returns a trimmed copy of the string.
Definition: StringUtils.hpp:85
auto ParseDataArray(std::istream &stream)
bool has_value() const noexcept
Definition: Optional.hpp:53
bool ValidatePaths(const std::vector< std::string > &fileVec, const bool expectFile)
Verifies if a given vector of strings are valid paths.
TensorPrinter(const std::string &binding, const armnn::TensorInfo &info, const std::string &outputTensorFile, bool dequantizeOutput)
mapbox::util::variant< std::vector< float >, std::vector< int >, std::vector< unsigned char >, std::vector< int8_t > > TContainer
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
bool ValidatePath(const std::string &file, const bool expectFile)
Verifies if the given string is a valid path.
std::pair< float, int32_t > QuantizationParams