From d7ed6d4e53a877a25fcea754d76c8831451f18f1 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Wed, 21 Jul 2021 09:42:43 +0100 Subject: GitHub #557 wrong result in int8 model * Added support for qasymms8 (int8) to ImageTensorGenerator * Added qasymmu8 as alias for qasymm8 in ImageTensorGenerator * Added support for qasymms8 (int8) to ExecuteNetwork * Added qasymmu8 as alias for qasymm8 in ExecuteNetwork * Set tflite to be the default model format in ImageTensorGenerator as it's the only supported model format. Signed-off-by: Mike Kelly Change-Id: Ieda7b78e668ea390e3565cd65a41fe0a9c8a5b83 --- tests/ExecuteNetwork/ExecuteNetwork.cpp | 36 +++++++++++++++++++--- .../ExecuteNetworkProgramOptions.cpp | 6 ++-- .../ImageTensorGenerator/ImageTensorGenerator.cpp | 18 ++++++++--- .../ImageTensorGenerator/ImageTensorGenerator.hpp | 34 +++++++++++++++++++- .../NetworkExecutionUtils.cpp | 30 ++++++++++++++++-- 5 files changed, 109 insertions(+), 15 deletions(-) diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index bce83583cc..a9b5a3c3f4 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -155,7 +155,8 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, std::copy(tensorData.begin(), tensorData.end(), inputData); } - else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0) + else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0 || + params.m_InputTypes[inputIndex].compare("qasymmu8") == 0) { auto inputData = tfLiteInterpreter->typed_tensor(input); @@ -175,6 +176,26 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, std::copy(tensorData.begin(), tensorData.end(), inputData); } + else if (params.m_InputTypes[inputIndex].compare("qasymms8") == 0) + { + auto inputData = tfLiteInterpreter->typed_tensor(input); + + if(inputData == NULL) + { + ARMNN_LOG(fatal) << "Input tensor is null, input type: " + "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect."; + return EXIT_FAILURE; + } + + std::vector tensorData; + PopulateTensorWithDataGeneric(tensorData, + params.m_InputTensorShapes[inputIndex]->GetNumElements(), + dataFile, + [](const std::string& s) + { return armnn::numeric_cast(std::stoi(s)); }); + + std::copy(tensorData.begin(), tensorData.end(), inputData); + } else { ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". "; @@ -245,7 +266,8 @@ int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, printf("%d ", tfLiteDelageOutputData[i]); } } - else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0) + else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0 || + params.m_OutputTypes[outputIndex].compare("qasymmu8") == 0) { auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor(tfLiteDelegateOutputId); if(tfLiteDelageOutputData == NULL) @@ -374,13 +396,17 @@ int MainImpl(const ExecuteNetworkParams& params, if (params.m_OutputTypes[i].compare("float") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); - } else if (params.m_OutputTypes[i].compare("int") == 0) + } + else if (params.m_OutputTypes[i].compare("int") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); - } else if (params.m_OutputTypes[i].compare("qasymm8") == 0) + } + else if (params.m_OutputTypes[i].compare("qasymm8") == 0 || + params.m_OutputTypes[i].compare("qasymmu8") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); - } else if (params.m_OutputTypes[i].compare("qsymms8") == 0) + } + else if (params.m_OutputTypes[i].compare("qasymms8") == 0) { outputDataContainers.push_back(std::vector(model.GetOutputSize(i))); } else diff --git a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp index 25ddecf3ba..b12547f51c 100644 --- a/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp +++ b/tests/ExecuteNetwork/ExecuteNetworkProgramOptions.cpp @@ -232,7 +232,7 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork", cxxopts::value(m_ExNetParams.m_ParseUnsupported)->default_value("false")->implicit_value("true")) ("q,quantize-input", - "If this option is enabled, all float inputs will be quantized to qasymm8. " + "If this option is enabled, all float inputs will be quantized as appropriate for the model's inputs. " "If unset, default to not quantized. Accepted values (true or false)", cxxopts::value(m_ExNetParams.m_QuantizeInput)->default_value("false")->implicit_value("true")) @@ -264,13 +264,13 @@ ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork", ("y,input-type", "The type of the input tensors in the network separated by comma. " "If unset, defaults to \"float\" for all defined inputs. " - "Accepted values (float, int or qasymm8).", + "Accepted values (float, int, qasymms8 or qasymmu8).", cxxopts::value()) ("z,output-type", "The type of the output tensors in the network separated by comma. " "If unset, defaults to \"float\" for all defined outputs. " - "Accepted values (float, int or qasymm8).", + "Accepted values (float, int, qasymms8 or qasymmu8).", cxxopts::value()) ("T,tflite-executor", diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp index a2110f9cf3..b4432558c4 100644 --- a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp +++ b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp @@ -164,15 +164,16 @@ public: ("f,model-format", "Format of the intended model file that uses the images." "Different formats have different image normalization styles." + "If unset, defaults to tflite." "Accepted value (tflite)", - cxxopts::value(m_ModelFormat)) + cxxopts::value(m_ModelFormat)->default_value("tflite")) ("o,outfile", "Output raw tensor file path", cxxopts::value(m_OutputFileName)) ("z,output-type", "The data type of the output tensors." "If unset, defaults to \"float\" for all defined inputs. " - "Accepted values (float, int or qasymm8)", + "Accepted values (float, int, qasymms8 or qasymmu8)", cxxopts::value(m_OutputType)->default_value("float")) ("new-width", "Resize image to new width. Keep original width if unspecified", @@ -254,10 +255,14 @@ public: { return armnn::DataType::Signed32; } - else if (m_OutputType == "qasymm8") + else if (m_OutputType == "qasymm8" || m_OutputType == "qasymmu8") { return armnn::DataType::QAsymmU8; } + else if (m_OutputType == "qasymms8") + { + return armnn::DataType::QAsymmS8; + } else { throw armnn::Exception("Unsupported input type" + m_OutputType); @@ -292,7 +297,8 @@ int main(int argc, char* argv[]) const unsigned int batchSize = 1; const armnn::DataLayout outputLayout(cmdline.GetLayout()); - using TContainer = mapbox::util::variant, std::vector, std::vector>; + using TContainer = mapbox::util::variant, std::vector, std::vector, + std::vector>; std::vector imageDataContainers; const NormalizationParameters& normParams = GetNormalizationParameters(modelFormat, outputType); try @@ -307,6 +313,10 @@ int main(int argc, char* argv[]) imageDataContainers.push_back(PrepareImageTensor( imagePath, newWidth, newHeight, normParams, batchSize, outputLayout)); break; + case armnn::DataType::QAsymmS8: + imageDataContainers.push_back(PrepareImageTensor( + imagePath, newWidth, newHeight, normParams, batchSize, outputLayout)); + break; case armnn::DataType::Float32: default: imageDataContainers.push_back(PrepareImageTensor( diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp index 5aa2ca8124..6d2e549360 100644 --- a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp +++ b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp @@ -56,6 +56,10 @@ NormalizationParameters GetNormalizationParameters(const SupportedFrontend& mode normParams.mean = { 128.0, 128.0, 128.0 }; break; case armnn::DataType::QAsymmU8: + break; + case armnn::DataType::QAsymmS8: + normParams.mean = { 128.0, 128.0, 128.0 }; + break; default: break; } @@ -138,7 +142,7 @@ std::vector PrepareImageTensor(const std::string& imagePath, return imageDataInt; } -// Prepare qasymm8 image tensor +// Prepare qasymmu8 image tensor template <> std::vector PrepareImageTensor(const std::string& imagePath, unsigned int newWidth, @@ -158,6 +162,26 @@ std::vector PrepareImageTensor(const std::string& imagePath, return imageDataQasymm8; } +// Prepare qasymms8 image tensor +template <> +std::vector PrepareImageTensor(const std::string& imagePath, + unsigned int newWidth, + unsigned int newHeight, + const NormalizationParameters& normParams, + unsigned int batchSize, + const armnn::DataLayout& outputLayout) +{ + // Get float32 image tensor + std::vector imageDataFloat = + PrepareImageTensor(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout); + std::vector imageDataQasymms8; + imageDataQasymms8.reserve(imageDataFloat.size()); + // Convert to uint8 image tensor with static cast + std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymms8), + [](float val) { return static_cast(val); }); + return imageDataQasymms8; +} + /** Write image tensor to ofstream * * @param[in] imageData Image tensor data @@ -176,3 +200,11 @@ void WriteImageTensorImpl(const std::vector& imageData, std::o { std::copy(imageData.begin(), imageData.end(), std::ostream_iterator(imageTensorFile, " ")); } + +// For int8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of +// numerical values +template <> +void WriteImageTensorImpl(const std::vector& imageData, std::ofstream& imageTensorFile) +{ + std::copy(imageData.begin(), imageData.end(), std::ostream_iterator(imageTensorFile, " ")); +} \ No newline at end of file diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp index 23b892ffb4..0906c1cf3f 100644 --- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp +++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.cpp @@ -39,6 +39,13 @@ auto ParseDataArray(std::istream& stream) return ParseArrayImpl(stream, [](const std::string& s) { return std::stoi(s); }); } +template<> +auto ParseDataArray(std::istream& stream) +{ + return ParseArrayImpl(stream, + [](const std::string& s) { return armnn::numeric_cast(std::stoi(s)); }); +} + template<> auto ParseDataArray(std::istream& stream) { @@ -54,7 +61,20 @@ auto ParseDataArray(std::istream& stream) [](const std::string& s) { return armnn::numeric_cast(std::stoi(s)); }); } - +template<> +auto ParseDataArray(std::istream& stream, + const float& quantizationScale, + const int32_t& quantizationOffset) +{ + return ParseArrayImpl(stream, + [&quantizationScale, &quantizationOffset](const std::string& s) + { + return armnn::numeric_cast( + armnn::Quantize(std::stof(s), + quantizationScale, + quantizationOffset)); + }); +} template<> auto ParseDataArray(std::istream& stream, @@ -232,12 +252,18 @@ void PopulateTensorWithData(TContainer& tensorData, ParseDataArray(inputTensorFile) : GenerateDummyTensorData(numElements); } - else if (dataTypeStr.compare("qasymm8") == 0) + else if (dataTypeStr.compare("qasymm8") == 0 || dataTypeStr.compare("qasymmu8") == 0) { tensorData = readFromFile ? ParseDataArray(inputTensorFile) : GenerateDummyTensorData(numElements); } + else if (dataTypeStr.compare("qasymms8") == 0) + { + tensorData = readFromFile ? + ParseDataArray(inputTensorFile) : + GenerateDummyTensorData(numElements); + } else { std::string errorMessage = "Unsupported tensor data type " + dataTypeStr; -- cgit v1.2.1