12 #if defined(ARMNN_SERIALIZER) 15 #if defined(ARMNN_TF_LITE_PARSER) 18 #if defined(ARMNN_ONNX_PARSER) 22 template<armnn::DataType NonQuantizedType>
25 template<armnn::DataType QuantizedType>
27 const float& quantizationScale,
28 const int32_t& quantizationOffset);
31 auto ParseDataArray<armnn::DataType::Float32>(std::istream& stream)
33 return ParseArrayImpl<float>(stream, [](
const std::string& s) {
return std::stof(s); });
37 auto ParseDataArray<armnn::DataType::Signed32>(std::istream& stream)
39 return ParseArrayImpl<int>(stream, [](
const std::string& s) {
return std::stoi(s); });
43 auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream)
45 return ParseArrayImpl<int8_t>(stream,
50 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
52 return ParseArrayImpl<uint8_t>(stream,
58 auto ParseDataArray<armnn::DataType::QSymmS8>(std::istream& stream)
60 return ParseArrayImpl<int8_t>(stream,
65 auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream,
66 const float& quantizationScale,
67 const int32_t& quantizationOffset)
69 return ParseArrayImpl<int8_t>(stream,
70 [&quantizationScale, &quantizationOffset](
const std::string& s)
73 armnn::Quantize<int8_t>(std::stof(s),
80 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
81 const float& quantizationScale,
82 const int32_t& quantizationOffset)
84 return ParseArrayImpl<uint8_t>(stream,
85 [&quantizationScale, &quantizationOffset](
const std::string& s)
88 armnn::Quantize<uint8_t>(std::stof(s),
94 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
97 return std::vector<T>(numElements,
static_cast<T
>(0));
103 return ParseArrayImpl<unsigned int>(
108 std::vector<std::string>
ParseStringList(
const std::string& inputString,
const char* delimiter)
110 std::stringstream stream(inputString);
111 return ParseArrayImpl<std::string>(stream, [](
const std::string& s) {
118 const std::string& outputTensorFile,
119 bool dequantizeOutput)
120 : m_OutputBinding(binding)
121 , m_Scale(info.GetQuantizationScale())
122 , m_Offset(info.GetQuantizationOffset())
123 , m_OutputTensorFile(outputTensorFile)
124 , m_DequantizeOutput(dequantizeOutput) {}
128 ForEachValue(values, [](
float value)
130 printf(
"%f ", value);
137 if(m_DequantizeOutput)
139 auto& scale = m_Scale;
140 auto& offset = m_Offset;
141 std::vector<float> dequantizedValues;
142 ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value)
145 printf(
"%f ", dequantizedValue);
146 dequantizedValues.push_back(dequantizedValue);
148 WriteToFile(dequantizedValues);
152 const std::vector<int> intValues(values.begin(), values.end());
159 ForEachValue(values, [](int8_t value)
161 printf(
"%d ", value);
168 ForEachValue(values, [](
int value)
170 printf(
"%d ", value);
175 template<
typename Container,
typename Delegate>
176 void TensorPrinter::ForEachValue(
const Container& c, Delegate delegate)
178 std::cout << m_OutputBinding <<
": ";
179 for (
const auto& value : c)
187 void TensorPrinter::WriteToFile(
const std::vector<T>& values)
189 if (!m_OutputTensorFile.empty())
191 std::ofstream outputTensorFile;
192 outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc);
193 if (outputTensorFile.is_open())
195 outputTensorFile << m_OutputBinding <<
": ";
196 std::copy(values.begin(), values.end(), std::ostream_iterator<T>(outputTensorFile,
" "));
200 ARMNN_LOG(info) <<
"Output Tensor File: " << m_OutputTensorFile <<
" could not be opened!";
202 outputTensorFile.close();
207 mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
211 unsigned int numElements,
212 const std::string& dataTypeStr,
216 const bool readFromFile = dataFile.
has_value() && !dataFile.
value().empty();
217 const bool quantizeData = qParams.
has_value();
219 std::ifstream inputTensorFile;
222 inputTensorFile = std::ifstream(dataFile.
value());
225 if (dataTypeStr.compare(
"float") == 0)
229 const float qScale = qParams.
value().first;
230 const int qOffset = qParams.
value().second;
232 tensorData = readFromFile ?
233 ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) :
234 GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
238 tensorData = readFromFile ?
239 ParseDataArray<armnn::DataType::Float32>(inputTensorFile) :
240 GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
243 else if (dataTypeStr.compare(
"int") == 0)
245 tensorData = readFromFile ?
246 ParseDataArray<armnn::DataType::Signed32>(inputTensorFile) :
247 GenerateDummyTensorData<armnn::DataType::Signed32>(numElements);
249 else if (dataTypeStr.compare(
"qsymms8") == 0)
251 tensorData = readFromFile ?
252 ParseDataArray<armnn::DataType::QSymmS8>(inputTensorFile) :
253 GenerateDummyTensorData<armnn::DataType::QSymmS8>(numElements);
255 else if (dataTypeStr.compare(
"qasymm8") == 0 || dataTypeStr.compare(
"qasymmu8") == 0)
257 tensorData = readFromFile ?
258 ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
259 GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
261 else if (dataTypeStr.compare(
"qasymms8") == 0)
263 tensorData = readFromFile ?
264 ParseDataArray<armnn::DataType::QAsymmS8>(inputTensorFile) :
265 GenerateDummyTensorData<armnn::DataType::QAsymmS8>(numElements);
269 std::string errorMessage =
"Unsupported tensor data type " + dataTypeStr;
272 inputTensorFile.close();
276 inputTensorFile.close();
281 if (!fs::exists(file))
283 std::cerr <<
"Given file path '" << file <<
"' does not exist" << std::endl;
286 if (!fs::is_regular_file(file) && expectFile)
288 std::cerr <<
"Given file path '" << file <<
"' is not a regular file" << std::endl;
294 bool ValidatePaths(
const std::vector<std::string>& fileVec,
const bool expectFile)
296 bool allPathsValid =
true;
297 for (
auto const& file : fileVec)
301 allPathsValid =
false;
304 return allPathsValid;
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
std::vector< unsigned int > ParseArray(std::istream &stream)
void operator()(const std::vector< float > &values)
std::vector< std::string > ParseStringList(const std::string &inputString, const char *delimiter)
Splits a given string at every accurance of delimiter into a vector of string.
#define ARMNN_LOG(severity)
std::vector< T > GenerateDummyTensorData(unsigned int numElements)
void PopulateTensorWithData(TContainer &tensorData, unsigned int numElements, const std::string &dataTypeStr, const armnn::Optional< QuantizationParams > &qParams, const armnn::Optional< std::string > &dataFile)
std::string StringTrimCopy(const std::string &str, const std::string &chars="\\\")
Trim from both the start and the end of a string, returns a trimmed copy of the string.
auto ParseDataArray(std::istream &stream)
bool has_value() const noexcept
mapbox::util::variant< std::vector< float >, std::vector< int >, std::vector< unsigned char >, std::vector< int8_t > > TContainer
bool ValidatePaths(const std::vector< std::string > &fileVec, const bool expectFile)
Verifies if a given vector of strings are valid paths.
TensorPrinter(const std::string &binding, const armnn::TensorInfo &info, const std::string &outputTensorFile, bool dequantizeOutput)
Base class for all ArmNN exceptions so that users can filter to just those.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
bool ValidatePath(const std::string &file, const bool expectFile)
Verifies if the given string is a valid path.
std::pair< float, int32_t > QuantizationParams