12 #if defined(ARMNN_SERIALIZER) 15 #if defined(ARMNN_TF_LITE_PARSER) 18 #if defined(ARMNN_ONNX_PARSER) 22 template<armnn::DataType NonQuantizedType>
25 template<armnn::DataType QuantizedType>
27 const float& quantizationScale,
28 const int32_t& quantizationOffset);
31 auto ParseDataArray<armnn::DataType::Float32>(std::istream& stream)
33 return ParseArrayImpl<float>(stream, [](
const std::string& s) {
return std::stof(s); });
37 auto ParseDataArray<armnn::DataType::Signed32>(std::istream& stream)
39 return ParseArrayImpl<int>(stream, [](
const std::string& s) {
return std::stoi(s); });
43 auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream)
45 return ParseArrayImpl<int8_t>(stream,
50 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
52 return ParseArrayImpl<uint8_t>(stream,
58 auto ParseDataArray<armnn::DataType::QSymmS8>(std::istream& stream)
60 return ParseArrayImpl<int8_t>(stream,
65 auto ParseDataArray<armnn::DataType::QAsymmS8>(std::istream& stream,
66 const float& quantizationScale,
67 const int32_t& quantizationOffset)
69 return ParseArrayImpl<int8_t>(stream,
70 [&quantizationScale, &quantizationOffset](
const std::string& s)
73 armnn::Quantize<int8_t>(std::stof(s),
80 auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
81 const float& quantizationScale,
82 const int32_t& quantizationOffset)
84 return ParseArrayImpl<uint8_t>(stream,
85 [&quantizationScale, &quantizationOffset](
const std::string& s)
88 armnn::Quantize<uint8_t>(std::stof(s),
94 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
97 return std::vector<T>(numElements,
static_cast<T
>(0));
103 return ParseArrayImpl<unsigned int>(
108 std::vector<std::string>
ParseStringList(
const std::string& inputString,
const char* delimiter)
110 std::stringstream stream(inputString);
111 return ParseArrayImpl<std::string>(stream, [](
const std::string& s) {
118 const std::string& outputTensorFile,
119 bool dequantizeOutput,
120 const bool printToConsole)
121 : m_OutputBinding(binding)
122 , m_Scale(info.GetQuantizationScale())
123 , m_Offset(info.GetQuantizationOffset())
124 , m_OutputTensorFile(outputTensorFile)
125 , m_DequantizeOutput(dequantizeOutput)
126 , m_PrintToConsole(printToConsole) {}
130 if (m_PrintToConsole)
132 std::cout << m_OutputBinding <<
": ";
133 ForEachValue(values, [](
float value)
135 printf(
"%f ", value);
144 if(m_DequantizeOutput)
146 auto& scale = m_Scale;
147 auto& offset = m_Offset;
148 std::vector<float> dequantizedValues;
149 ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value)
152 dequantizedValues.push_back(dequantizedValue);
155 if (m_PrintToConsole)
157 std::cout << m_OutputBinding <<
": ";
158 ForEachValue(dequantizedValues, [](
float value)
160 printf(
"%f ", value);
165 WriteToFile(dequantizedValues);
169 const std::vector<int> intValues(values.begin(), values.end());
176 if (m_PrintToConsole)
178 std::cout << m_OutputBinding <<
": ";
179 ForEachValue(values, [](int8_t value)
181 printf(
"%d ", value);
190 if (m_PrintToConsole)
192 std::cout << m_OutputBinding <<
": ";
193 ForEachValue(values, [](
int value)
195 printf(
"%d ", value);
202 template<
typename Container,
typename Delegate>
203 void TensorPrinter::ForEachValue(
const Container& c, Delegate delegate)
205 for (
const auto& value : c)
212 void TensorPrinter::WriteToFile(
const std::vector<T>& values)
214 if (!m_OutputTensorFile.empty())
216 std::ofstream outputTensorFile;
217 outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc);
218 if (outputTensorFile.is_open())
220 outputTensorFile << m_OutputBinding <<
": ";
221 std::copy(values.begin(), values.end(), std::ostream_iterator<T>(outputTensorFile,
" "));
225 ARMNN_LOG(info) <<
"Output Tensor File: " << m_OutputTensorFile <<
" could not be opened!";
227 outputTensorFile.close();
232 unsigned int numElements,
233 const std::string& dataTypeStr,
237 const bool readFromFile = dataFile.
has_value() && !dataFile.
value().empty();
238 const bool quantizeData = qParams.
has_value();
240 std::ifstream inputTensorFile;
243 inputTensorFile = std::ifstream(dataFile.
value());
246 if (dataTypeStr.compare(
"float") == 0)
250 const float qScale = qParams.
value().first;
251 const int qOffset = qParams.
value().second;
253 tensorData = readFromFile ?
254 ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) :
255 GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
259 tensorData = readFromFile ?
260 ParseDataArray<armnn::DataType::Float32>(inputTensorFile) :
261 GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
264 else if (dataTypeStr.compare(
"int") == 0)
266 tensorData = readFromFile ?
267 ParseDataArray<armnn::DataType::Signed32>(inputTensorFile) :
268 GenerateDummyTensorData<armnn::DataType::Signed32>(numElements);
270 else if (dataTypeStr.compare(
"qsymms8") == 0)
272 tensorData = readFromFile ?
273 ParseDataArray<armnn::DataType::QSymmS8>(inputTensorFile) :
274 GenerateDummyTensorData<armnn::DataType::QSymmS8>(numElements);
276 else if (dataTypeStr.compare(
"qasymm8") == 0 || dataTypeStr.compare(
"qasymmu8") == 0)
278 tensorData = readFromFile ?
279 ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
280 GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
282 else if (dataTypeStr.compare(
"qasymms8") == 0)
284 tensorData = readFromFile ?
285 ParseDataArray<armnn::DataType::QAsymmS8>(inputTensorFile) :
286 GenerateDummyTensorData<armnn::DataType::QAsymmS8>(numElements);
290 std::string errorMessage =
"Unsupported tensor data type " + dataTypeStr;
293 inputTensorFile.close();
297 inputTensorFile.close();
302 if (!fs::exists(file))
304 std::cerr <<
"Given file path '" << file <<
"' does not exist" << std::endl;
307 if (!fs::is_regular_file(file) && expectFile)
309 std::cerr <<
"Given file path '" << file <<
"' is not a regular file" << std::endl;
315 bool ValidatePaths(
const std::vector<std::string>& fileVec,
const bool expectFile)
317 bool allPathsValid =
true;
318 for (
auto const& file : fileVec)
322 allPathsValid =
false;
325 return allPathsValid;
float Dequantize(QuantizedType value, float scale, int32_t offset)
Dequantize an 8-bit data type into a floating point data type.
std::vector< unsigned int > ParseArray(std::istream &stream)
void operator()(const std::vector< float > &values)
std::vector< std::string > ParseStringList(const std::string &inputString, const char *delimiter)
Splits a given string at every accurance of delimiter into a vector of string.
#define ARMNN_LOG(severity)
std::vector< T > GenerateDummyTensorData(unsigned int numElements)
std::string StringTrimCopy(const std::string &str, const std::string &chars="\\\")
Trim from both the start and the end of a string, returns a trimmed copy of the string.
void PopulateTensorWithData(armnnUtils::TContainer &tensorData, unsigned int numElements, const std::string &dataTypeStr, const armnn::Optional< QuantizationParams > &qParams, const armnn::Optional< std::string > &dataFile)
auto ParseDataArray(std::istream &stream)
bool has_value() const noexcept
bool ValidatePaths(const std::vector< std::string > &fileVec, const bool expectFile)
Verifies if a given vector of strings are valid paths.
TensorPrinter(const std::string &binding, const armnn::TensorInfo &info, const std::string &outputTensorFile, bool dequantizeOutput, bool printToConsole=true)
Base class for all ArmNN exceptions so that users can filter to just those.
mapbox::util::variant< std::vector< float >, std::vector< int >, std::vector< unsigned char >, std::vector< int8_t > > TContainer
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
bool ValidatePath(const std::string &file, const bool expectFile)
Verifies if the given string is a valid path.