diff options
author | Mike Kelly <mike.kelly@arm.com> | 2021-07-21 09:42:43 +0100 |
---|---|---|
committer | mike.kelly <mike.kelly@arm.com> | 2021-07-26 08:56:53 +0000 |
commit | d7ed6d4e53a877a25fcea754d76c8831451f18f1 (patch) | |
tree | 74edb3b7cdc991232bb8f8577ae2fd89dfc95b0a /tests/ImageTensorGenerator | |
parent | 4adf0de1f2380c215b7d6f643afe04ef4366df1e (diff) | |
download | armnn-d7ed6d4e53a877a25fcea754d76c8831451f18f1.tar.gz |
GitHub #557 wrong result in int8 modelexperimental/CustomAllocator3
* Added support for qasymms8 (int8) to ImageTensorGenerator
* Added qasymmu8 as alias for qasymm8 in ImageTensorGenerator
* Added support for qasymms8 (int8) to ExecuteNetwork
* Added qasymmu8 as alias for qasymm8 in ExecuteNetwork
* Set tflite to be the default model format in ImageTensorGenerator as
it's the only supported model format.
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ieda7b78e668ea390e3565cd65a41fe0a9c8a5b83
Diffstat (limited to 'tests/ImageTensorGenerator')
-rw-r--r-- | tests/ImageTensorGenerator/ImageTensorGenerator.cpp | 18 | ||||
-rw-r--r-- | tests/ImageTensorGenerator/ImageTensorGenerator.hpp | 34 |
2 files changed, 47 insertions, 5 deletions
diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp index a2110f9cf3..b4432558c4 100644 --- a/tests/ImageTensorGenerator/ImageTensorGenerator.cpp +++ b/tests/ImageTensorGenerator/ImageTensorGenerator.cpp @@ -164,15 +164,16 @@ public: ("f,model-format", "Format of the intended model file that uses the images." "Different formats have different image normalization styles." + "If unset, defaults to tflite." "Accepted value (tflite)", - cxxopts::value<std::string>(m_ModelFormat)) + cxxopts::value<std::string>(m_ModelFormat)->default_value("tflite")) ("o,outfile", "Output raw tensor file path", cxxopts::value<std::string>(m_OutputFileName)) ("z,output-type", "The data type of the output tensors." "If unset, defaults to \"float\" for all defined inputs. " - "Accepted values (float, int or qasymm8)", + "Accepted values (float, int, qasymms8 or qasymmu8)", cxxopts::value<std::string>(m_OutputType)->default_value("float")) ("new-width", "Resize image to new width. Keep original width if unspecified", @@ -254,10 +255,14 @@ public: { return armnn::DataType::Signed32; } - else if (m_OutputType == "qasymm8") + else if (m_OutputType == "qasymm8" || m_OutputType == "qasymmu8") { return armnn::DataType::QAsymmU8; } + else if (m_OutputType == "qasymms8") + { + return armnn::DataType::QAsymmS8; + } else { throw armnn::Exception("Unsupported input type" + m_OutputType); @@ -292,7 +297,8 @@ int main(int argc, char* argv[]) const unsigned int batchSize = 1; const armnn::DataLayout outputLayout(cmdline.GetLayout()); - using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>>; + using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<uint8_t>, + std::vector<int8_t>>; std::vector<TContainer> imageDataContainers; const NormalizationParameters& normParams = GetNormalizationParameters(modelFormat, outputType); try @@ -307,6 +313,10 @@ int main(int argc, char* argv[]) imageDataContainers.push_back(PrepareImageTensor<uint8_t>( imagePath, newWidth, newHeight, normParams, batchSize, outputLayout)); break; + case armnn::DataType::QAsymmS8: + imageDataContainers.push_back(PrepareImageTensor<int8_t>( + imagePath, newWidth, newHeight, normParams, batchSize, outputLayout)); + break; case armnn::DataType::Float32: default: imageDataContainers.push_back(PrepareImageTensor<float>( diff --git a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp index 5aa2ca8124..6d2e549360 100644 --- a/tests/ImageTensorGenerator/ImageTensorGenerator.hpp +++ b/tests/ImageTensorGenerator/ImageTensorGenerator.hpp @@ -56,6 +56,10 @@ NormalizationParameters GetNormalizationParameters(const SupportedFrontend& mode normParams.mean = { 128.0, 128.0, 128.0 }; break; case armnn::DataType::QAsymmU8: + break; + case armnn::DataType::QAsymmS8: + normParams.mean = { 128.0, 128.0, 128.0 }; + break; default: break; } @@ -138,7 +142,7 @@ std::vector<int> PrepareImageTensor<int>(const std::string& imagePath, return imageDataInt; } -// Prepare qasymm8 image tensor +// Prepare qasymmu8 image tensor template <> std::vector<uint8_t> PrepareImageTensor<uint8_t>(const std::string& imagePath, unsigned int newWidth, @@ -158,6 +162,26 @@ std::vector<uint8_t> PrepareImageTensor<uint8_t>(const std::string& imagePath, return imageDataQasymm8; } +// Prepare qasymms8 image tensor +template <> +std::vector<int8_t> PrepareImageTensor<int8_t>(const std::string& imagePath, + unsigned int newWidth, + unsigned int newHeight, + const NormalizationParameters& normParams, + unsigned int batchSize, + const armnn::DataLayout& outputLayout) +{ + // Get float32 image tensor + std::vector<float> imageDataFloat = + PrepareImageTensor<float>(imagePath, newWidth, newHeight, normParams, batchSize, outputLayout); + std::vector<int8_t> imageDataQasymms8; + imageDataQasymms8.reserve(imageDataFloat.size()); + // Convert to uint8 image tensor with static cast + std::transform(imageDataFloat.begin(), imageDataFloat.end(), std::back_inserter(imageDataQasymms8), + [](float val) { return static_cast<uint8_t>(val); }); + return imageDataQasymms8; +} + /** Write image tensor to ofstream * * @param[in] imageData Image tensor data @@ -176,3 +200,11 @@ void WriteImageTensorImpl<uint8_t>(const std::vector<uint8_t>& imageData, std::o { std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " ")); } + +// For int8_t image tensor, cast it to int before writing it to prevent writing data as characters instead of +// numerical values +template <> +void WriteImageTensorImpl<int8_t>(const std::vector<int8_t>& imageData, std::ofstream& imageTensorFile) +{ + std::copy(imageData.begin(), imageData.end(), std::ostream_iterator<int>(imageTensorFile, " ")); +}
\ No newline at end of file |