From a723ec5d2ac35948efb5dfd0c121a1a89cb977b7 Mon Sep 17 00:00:00 2001 From: FinnWilliamsArm Date: Wed, 22 May 2019 14:50:55 +0100 Subject: IVGCVSW-3129 Image pre-processing fix for TFLite * Resized images for quantized models are now statically cast to uint8 instead of quantized * Removed optional quantization parameters from ImagePreprocessor constructor * Changed mean and scale for TFLite models Signed-off-by: FinnWilliamsArm Change-Id: Id5ffdf77f3614d10c417e769bd8ffc4a4c07308b --- tests/ImagePreprocessor.cpp | 7 +++---- tests/ImagePreprocessor.hpp | 5 +---- tests/InferenceTestImage.cpp | 8 +++++--- tests/InferenceTestImage.hpp | 3 ++- tests/OnnxMobileNet-Armnn/OnnxMobileNet-Armnn.cpp | 3 +-- .../TfLiteInceptionV3Quantized-Armnn.cpp | 4 +--- .../TfLiteInceptionV4Quantized-Armnn.cpp | 4 +--- tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp | 4 +++- .../TfLiteMobileNetQuantizedSoftmax-Armnn.cpp | 4 +--- .../TfLiteMobilenetQuantized-Armnn.cpp | 4 +--- .../TfLiteMobilenetV2Quantized-Armnn.cpp | 4 +--- .../TfLiteResNetV2-50-Quantized-Armnn.cpp | 4 +--- tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp | 5 +++-- tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp | 4 +--- 14 files changed, 25 insertions(+), 38 deletions(-) diff --git a/tests/ImagePreprocessor.cpp b/tests/ImagePreprocessor.cpp index 0ef0fda4f9..74bc943ee8 100644 --- a/tests/ImagePreprocessor.cpp +++ b/tests/ImagePreprocessor.cpp @@ -31,7 +31,7 @@ unsigned int ImagePreprocessor::GetLabelAndResizedImageAsFloat(unsign result = image.Resize(m_Width, m_Height, CHECK_LOCATION(), InferenceTestImage::ResizingMethods::BilinearAndNormalized, - m_Mean, m_Stddev); + m_Mean, m_Stddev, m_Scale); // duplicate data across the batch for (unsigned int i = 1; i < m_BatchSize; i++) @@ -72,9 +72,8 @@ ImagePreprocessor::GetTestCaseData(unsigned int testCaseId) for (size_t i=0; i(resized[i], - m_Scale, - m_Offset); + quantized[i] = static_cast(resized[i]); } + return std::make_unique(label, std::move(quantized)); } diff --git a/tests/ImagePreprocessor.hpp b/tests/ImagePreprocessor.hpp index d77113c6d9..cd586818c2 100644 --- a/tests/ImagePreprocessor.hpp +++ b/tests/ImagePreprocessor.hpp @@ -33,8 +33,7 @@ public: unsigned int width, unsigned int height, const std::vector& imageSet, - float scale=1.0, - int32_t offset=0, + float scale=255.0f, const std::array mean={{0, 0, 0}}, const std::array stddev={{1, 1, 1}}, DataFormat dataFormat=DataFormat::NHWC, @@ -44,7 +43,6 @@ public: , m_Width(width) , m_BatchSize(batchSize) , m_Scale(scale) - , m_Offset(offset) , m_ImageSet(imageSet) , m_Mean(mean) , m_Stddev(stddev) @@ -66,7 +64,6 @@ private: unsigned int m_BatchSize; // Quantization parameters float m_Scale; - int32_t m_Offset; const std::vector m_ImageSet; const std::array m_Mean; diff --git a/tests/InferenceTestImage.cpp b/tests/InferenceTestImage.cpp index b011e6ac8f..92c67ae225 100644 --- a/tests/InferenceTestImage.cpp +++ b/tests/InferenceTestImage.cpp @@ -55,6 +55,7 @@ inline void PutData(std::vector & data, std::vector ResizeBilinearAndNormalize(const InferenceTestImage & image, const unsigned int outputWidth, const unsigned int outputHeight, + const float scale, const std::array& mean, const std::array& stddev) { @@ -114,7 +115,7 @@ std::vector ResizeBilinearAndNormalize(const InferenceTestImage & image, const float ly0 = Lerp(float(rgb_x0y0[c]), float(rgb_x1y0[c]), xw); const float ly1 = Lerp(float(rgb_x0y1[c]), float(rgb_x1y1[c]), xw); const float l = Lerp(ly0, ly1, yw); - PutData(out, outputWidth, x, y, c, ((l/255.0f) - mean[c])/stddev[c]); + PutData(out, outputWidth, x, y, c, ((l / scale) - mean[c]) / stddev[c]); } } } @@ -210,7 +211,8 @@ std::vector InferenceTestImage::Resize(unsigned int newWidth, const armnn::CheckLocation& location, const ResizingMethods meth, const std::array& mean, - const std::array& stddev) + const std::array& stddev, + const float scale) { std::vector out; if (newWidth == 0 || newHeight == 0) @@ -227,7 +229,7 @@ std::vector InferenceTestImage::Resize(unsigned int newWidth, } case ResizingMethods::BilinearAndNormalized: { - out = ResizeBilinearAndNormalize(*this, newWidth, newHeight, mean, stddev); + out = ResizeBilinearAndNormalize(*this, newWidth, newHeight, scale, mean, stddev); break; } default: diff --git a/tests/InferenceTestImage.hpp b/tests/InferenceTestImage.hpp index 59f4cc7073..643d060919 100644 --- a/tests/InferenceTestImage.hpp +++ b/tests/InferenceTestImage.hpp @@ -92,7 +92,8 @@ public: const armnn::CheckLocation& location, const ResizingMethods meth = ResizingMethods::STB, const std::array& mean = {{0.0, 0.0, 0.0}}, - const std::array& stddev = {{1.0, 1.0, 1.0}}); + const std::array& stddev = {{1.0, 1.0, 1.0}}, + const float scale = 255.0f); void Write(WriteFormat format, const char* filePath) const; diff --git a/tests/OnnxMobileNet-Armnn/OnnxMobileNet-Armnn.cpp b/tests/OnnxMobileNet-Armnn/OnnxMobileNet-Armnn.cpp index 79ee49e595..8786feacf9 100644 --- a/tests/OnnxMobileNet-Armnn/OnnxMobileNet-Armnn.cpp +++ b/tests/OnnxMobileNet-Armnn/OnnxMobileNet-Armnn.cpp @@ -40,8 +40,7 @@ int main(int argc, char* argv[]) 224, 224, imageSet, - 1.0, // scale - 0, // offset + 255.0, // scale {{0.485f, 0.456f, 0.406f}}, // mean {{0.229f, 0.224f, 0.225f}}, // stddev DatabaseType::DataFormat::NCHW); // format diff --git a/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp b/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp index 4fa0e140f1..bf5a865ac8 100644 --- a/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp +++ b/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp @@ -40,14 +40,12 @@ int main(int argc, char* argv[]) [&imageSet](const char* dataDir, const ModelType & model) { // we need to get the input quantization parameters from // the parsed model - auto inputBinding = model.GetInputBindingInfo(); return DatabaseType( dataDir, 299, 299, imageSet, - inputBinding.second.GetQuantizationScale(), - inputBinding.second.GetQuantizationOffset()); + 1); }, &inputTensorShape); } diff --git a/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp b/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp index bcb9db8a62..b0af830ba7 100644 --- a/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp +++ b/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp @@ -40,14 +40,12 @@ int main(int argc, char* argv[]) [&imageSet](const char* dataDir, const ModelType & model) { // we need to get the input quantization parameters from // the parsed model - auto inputBinding = model.GetInputBindingInfo(); return DatabaseType( dataDir, 299, 299, imageSet, - inputBinding.second.GetQuantizationScale(), - inputBinding.second.GetQuantizationOffset()); + 1); }, &inputTensorShape); } diff --git a/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp b/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp index c676cd7355..4cf16d78cc 100644 --- a/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp +++ b/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp @@ -42,7 +42,9 @@ int main(int argc, char* argv[]) dataDir, 224, 224, - imageSet); + imageSet, + 127.5f, + {0.5f,0.5f,0.5f}); }, &inputTensorShape); } diff --git a/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp b/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp index 4d99e9ec86..70828498df 100644 --- a/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp +++ b/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp @@ -40,14 +40,12 @@ int main(int argc, char* argv[]) [&imageSet](const char* dataDir, const ModelType & model) { // we need to get the input quantization parameters from // the parsed model - auto inputBinding = model.GetInputBindingInfo(); return DatabaseType( dataDir, 128, 128, imageSet, - inputBinding.second.GetQuantizationScale(), - inputBinding.second.GetQuantizationOffset(), + 1, {{0, 0, 0}}, {{1, 1, 1}}, DatabaseType::DataFormat::NCHW, diff --git a/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp b/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp index 220964d061..1b411f9b94 100644 --- a/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp +++ b/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp @@ -108,14 +108,12 @@ int main(int argc, char* argv[]) [&imageSet](const char* dataDir, const ModelType & model) { // we need to get the input quantization parameters from // the parsed model - auto inputBinding = model.GetInputBindingInfo(); return DatabaseType( dataDir, 224, 224, imageSet, - inputBinding.second.GetQuantizationScale(), - inputBinding.second.GetQuantizationOffset()); + 1); }, &inputTensorShape); } diff --git a/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp b/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp index 5db5c243b1..9bc1034fea 100644 --- a/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp +++ b/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp @@ -40,14 +40,12 @@ int main(int argc, char* argv[]) [&imageSet](const char* dataDir, const ModelType & model) { // we need to get the input quantization parameters from // the parsed model - auto inputBinding = model.GetInputBindingInfo(); return DatabaseType( dataDir, 224, 224, imageSet, - inputBinding.second.GetQuantizationScale(), - inputBinding.second.GetQuantizationOffset()); + 1); }, &inputTensorShape); } diff --git a/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp b/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp index 48e63211a3..98235e3f8a 100644 --- a/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp +++ b/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp @@ -40,14 +40,12 @@ int main(int argc, char* argv[]) [&imageSet](const char* dataDir, const ModelType & model) { // we need to get the input quantization parameters from // the parsed model - auto inputBinding = model.GetInputBindingInfo(); return DatabaseType( dataDir, 224, 224, imageSet, - inputBinding.second.GetQuantizationScale(), - inputBinding.second.GetQuantizationOffset()); + 1); }, &inputTensorShape); } diff --git a/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp b/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp index 36fc72cdf4..1e2ffbf568 100644 --- a/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp +++ b/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp @@ -28,7 +28,6 @@ int main(int argc, char* argv[]) using DatabaseType = ImagePreprocessor; using ParserType = armnnTfLiteParser::ITfLiteParser; using ModelType = InferenceModel; - // Coverity fix: ClassifierInferenceTestMain() may throw uncaught exceptions. retVal = armnn::test::ClassifierInferenceTestMain( @@ -43,7 +42,9 @@ int main(int argc, char* argv[]) dataDir, 299, 299, - imageSet); + imageSet, + 127.5f, + {0.5f,0.5f,0.5f}); }, &inputTensorShape); } diff --git a/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp b/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp index 0ba1e5dc00..2084d2d656 100644 --- a/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp +++ b/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp @@ -42,14 +42,12 @@ int main(int argc, char* argv[]) [&imageSet](const char* dataDir, const ModelType & model) { // we need to get the input quantization parameters from // the parsed model - auto inputBinding = model.GetInputBindingInfo(); return DatabaseType( dataDir, 224, 224, imageSet, - inputBinding.second.GetQuantizationScale(), - inputBinding.second.GetQuantizationOffset(), + 1, {{0, 0, 0}}, {{1, 1, 1}}, DatabaseType::DataFormat::NCHW, -- cgit v1.2.1