ArmNN
 20.11
NetworkExecutionUtils.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <Filesystem.hpp>
9 #include <InferenceTest.hpp>
10 #include <ResolveType.hpp>
11 
12 #if defined(ARMNN_SERIALIZER)
14 #endif
15 #if defined(ARMNN_CAFFE_PARSER)
17 #endif
18 #if defined(ARMNN_TF_PARSER)
20 #endif
21 #if defined(ARMNN_TF_LITE_PARSER)
23 #endif
24 #if defined(ARMNN_ONNX_PARSER)
26 #endif
27 
28 template<armnn::DataType NonQuantizedType>
29 auto ParseDataArray(std::istream& stream);
30 
31 template<armnn::DataType QuantizedType>
32 auto ParseDataArray(std::istream& stream,
33  const float& quantizationScale,
34  const int32_t& quantizationOffset);
35 
36 template<>
37 auto ParseDataArray<armnn::DataType::Float32>(std::istream& stream)
38 {
39  return ParseArrayImpl<float>(stream, [](const std::string& s) { return std::stof(s); });
40 }
41 
42 template<>
43 auto ParseDataArray<armnn::DataType::Signed32>(std::istream& stream)
44 {
45  return ParseArrayImpl<int>(stream, [](const std::string& s) { return std::stoi(s); });
46 }
47 
48 template<>
49 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
50 {
51  return ParseArrayImpl<uint8_t>(stream,
52  [](const std::string& s) { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
53 }
54 
55 template<>
56 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
57  const float& quantizationScale,
58  const int32_t& quantizationOffset)
59 {
60  return ParseArrayImpl<uint8_t>(stream,
61  [&quantizationScale, &quantizationOffset](const std::string& s)
62  {
63  return armnn::numeric_cast<uint8_t>(
64  armnn::Quantize<uint8_t>(std::stof(s),
65  quantizationScale,
66  quantizationOffset));
67  });
68 }
69 
70 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
71 std::vector<T> GenerateDummyTensorData(unsigned int numElements)
72 {
73  return std::vector<T>(numElements, static_cast<T>(0));
74 }
75 
76 
77 std::vector<unsigned int> ParseArray(std::istream& stream)
78 {
79  return ParseArrayImpl<unsigned int>(
80  stream,
81  [](const std::string& s) { return armnn::numeric_cast<unsigned int>(std::stoi(s)); });
82 }
83 
84 std::vector<std::string> ParseStringList(const std::string& inputString, const char* delimiter)
85 {
86  std::stringstream stream(inputString);
87  return ParseArrayImpl<std::string>(stream, [](const std::string& s) {
88  return armnn::stringUtils::StringTrimCopy(s); }, delimiter);
89 }
90 
91 
92 TensorPrinter::TensorPrinter(const std::string& binding,
93  const armnn::TensorInfo& info,
94  const std::string& outputTensorFile,
95  bool dequantizeOutput)
96  : m_OutputBinding(binding)
97  , m_Scale(info.GetQuantizationScale())
98  , m_Offset(info.GetQuantizationOffset())
99  , m_OutputTensorFile(outputTensorFile)
100  , m_DequantizeOutput(dequantizeOutput) {}
101 
102 void TensorPrinter::operator()(const std::vector<float>& values)
103 {
104  ForEachValue(values, [](float value)
105  {
106  printf("%f ", value);
107  });
108  WriteToFile(values);
109 }
110 
111 void TensorPrinter::operator()(const std::vector<uint8_t>& values)
112 {
113  if(m_DequantizeOutput)
114  {
115  auto& scale = m_Scale;
116  auto& offset = m_Offset;
117  std::vector<float> dequantizedValues;
118  ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value)
119  {
120  auto dequantizedValue = armnn::Dequantize(value, scale, offset);
121  printf("%f ", dequantizedValue);
122  dequantizedValues.push_back(dequantizedValue);
123  });
124  WriteToFile(dequantizedValues);
125  }
126  else
127  {
128  const std::vector<int> intValues(values.begin(), values.end());
129  operator()(intValues);
130  }
131 }
132 
133 void TensorPrinter::operator()(const std::vector<int>& values)
134 {
135  ForEachValue(values, [](int value)
136  {
137  printf("%d ", value);
138  });
139  WriteToFile(values);
140 }
141 
142 template<typename Container, typename Delegate>
143 void TensorPrinter::ForEachValue(const Container& c, Delegate delegate)
144 {
145  std::cout << m_OutputBinding << ": ";
146  for (const auto& value : c)
147  {
148  delegate(value);
149  }
150  printf("\n");
151 }
152 
153 template<typename T>
154 void TensorPrinter::WriteToFile(const std::vector<T>& values)
155 {
156  if (!m_OutputTensorFile.empty())
157  {
158  std::ofstream outputTensorFile;
159  outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc);
160  if (outputTensorFile.is_open())
161  {
162  outputTensorFile << m_OutputBinding << ": ";
163  std::copy(values.begin(), values.end(), std::ostream_iterator<T>(outputTensorFile, " "));
164  }
165  else
166  {
167  ARMNN_LOG(info) << "Output Tensor File: " << m_OutputTensorFile << " could not be opened!";
168  }
169  outputTensorFile.close();
170  }
171 }
172 
173 using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
174 using QuantizationParams = std::pair<float, int32_t>;
175 
177  unsigned int numElements,
178  const std::string& dataTypeStr,
180  const armnn::Optional<std::string>& dataFile)
181 {
182  const bool readFromFile = dataFile.has_value() && !dataFile.value().empty();
183  const bool quantizeData = qParams.has_value();
184 
185  std::ifstream inputTensorFile;
186  if (readFromFile)
187  {
188  inputTensorFile = std::ifstream(dataFile.value());
189  }
190 
191  if (dataTypeStr.compare("float") == 0)
192  {
193  if (quantizeData)
194  {
195  const float qScale = qParams.value().first;
196  const int qOffset = qParams.value().second;
197 
198  tensorData = readFromFile ?
199  ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) :
200  GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
201  }
202  else
203  {
204  tensorData = readFromFile ?
205  ParseDataArray<armnn::DataType::Float32>(inputTensorFile) :
206  GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
207  }
208  }
209  else if (dataTypeStr.compare("int") == 0)
210  {
211  tensorData = readFromFile ?
212  ParseDataArray<armnn::DataType::Signed32>(inputTensorFile) :
213  GenerateDummyTensorData<armnn::DataType::Signed32>(numElements);
214  }
215  else if (dataTypeStr.compare("qasymm8") == 0)
216  {
217  tensorData = readFromFile ?
218  ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
219  GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
220  }
221  else
222  {
223  std::string errorMessage = "Unsupported tensor data type " + dataTypeStr;
224  ARMNN_LOG(fatal) << errorMessage;
225 
226  inputTensorFile.close();
227  throw armnn::Exception(errorMessage);
228  }
229 
230  inputTensorFile.close();
231 }
232 
233 bool ValidatePath(const std::string& file, const bool expectFile)
234 {
235  if (!fs::exists(file))
236  {
237  std::cerr << "Given file path '" << file << "' does not exist" << std::endl;
238  return false;
239  }
240  if (!fs::is_regular_file(file) && expectFile)
241  {
242  std::cerr << "Given file path '" << file << "' is not a regular file" << std::endl;
243  return false;
244  }
245  return true;
246 }
247 
248 bool ValidatePaths(const std::vector<std::string>& fileVec, const bool expectFile)
249 {
250  bool allPathsValid = true;
251  for (auto const& file : fileVec)
252  {
253  if(!ValidatePath(file, expectFile))
254  {
255  allPathsValid = false;
256  }
257  }
258  return allPathsValid;
259 }
260 
261 
262 
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
Definition: TypesUtils.cpp:46
std::vector< unsigned int > ParseArray(std::istream &stream)
void operator()(const std::vector< float > &values)
std::vector< std::string > ParseStringList(const std::string &inputString, const char *delimiter)
Splits a given string at every accurance of delimiter into a vector of string.
mapbox::util::variant< std::vector< float >, std::vector< int >, std::vector< unsigned char > > TContainer
#define ARMNN_LOG(severity)
Definition: Logging.hpp:163
std::vector< T > GenerateDummyTensorData(unsigned int numElements)
void PopulateTensorWithData(TContainer &tensorData, unsigned int numElements, const std::string &dataTypeStr, const armnn::Optional< QuantizationParams > &qParams, const armnn::Optional< std::string > &dataFile)
std::string StringTrimCopy(const std::string &str, const std::string &chars="\\\")
Trim from both the start and the end of a string, returns a trimmed copy of the string.
Definition: StringUtils.hpp:85
auto ParseDataArray(std::istream &stream)
bool has_value() const noexcept
Definition: Optional.hpp:53
bool ValidatePaths(const std::vector< std::string > &fileVec, const bool expectFile)
Verifies if a given vector of strings are valid paths.
TensorPrinter(const std::string &binding, const armnn::TensorInfo &info, const std::string &outputTensorFile, bool dequantizeOutput)
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
bool ValidatePath(const std::string &file, const bool expectFile)
Verifies if the given string is a valid path.
std::pair< float, int32_t > QuantizationParams