aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFinnWilliamsArm <Finn.Williams@arm.com>2019-05-22 14:50:55 +0100
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>2019-05-28 09:59:44 +0100
commita723ec5d2ac35948efb5dfd0c121a1a89cb977b7 (patch)
treee0a101fbb7ba41cd50e30a1e0ce31529404175ed
parent7f2c35a82ec11be50b3478bd15207320bbf3bd57 (diff)
downloadarmnn-a723ec5d2ac35948efb5dfd0c121a1a89cb977b7.tar.gz
IVGCVSW-3129 Image pre-processing fix for TFLitev19.05
* Resized images for quantized models are now statically cast to uint8 instead of quantized * Removed optional quantization parameters from ImagePreprocessor constructor * Changed mean and scale for TFLite models Signed-off-by: FinnWilliamsArm <Finn.Williams@arm.com> Change-Id: Id5ffdf77f3614d10c417e769bd8ffc4a4c07308b
-rw-r--r--tests/ImagePreprocessor.cpp7
-rw-r--r--tests/ImagePreprocessor.hpp5
-rw-r--r--tests/InferenceTestImage.cpp8
-rw-r--r--tests/InferenceTestImage.hpp3
-rw-r--r--tests/OnnxMobileNet-Armnn/OnnxMobileNet-Armnn.cpp3
-rw-r--r--tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp4
-rw-r--r--tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp4
-rw-r--r--tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp4
-rw-r--r--tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp4
-rw-r--r--tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp4
-rw-r--r--tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp4
-rw-r--r--tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp4
-rw-r--r--tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp5
-rw-r--r--tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp4
14 files changed, 25 insertions, 38 deletions
diff --git a/tests/ImagePreprocessor.cpp b/tests/ImagePreprocessor.cpp
index 0ef0fda4f9..74bc943ee8 100644
--- a/tests/ImagePreprocessor.cpp
+++ b/tests/ImagePreprocessor.cpp
@@ -31,7 +31,7 @@ unsigned int ImagePreprocessor<TDataType>::GetLabelAndResizedImageAsFloat(unsign
result = image.Resize(m_Width, m_Height, CHECK_LOCATION(),
InferenceTestImage::ResizingMethods::BilinearAndNormalized,
- m_Mean, m_Stddev);
+ m_Mean, m_Stddev, m_Scale);
// duplicate data across the batch
for (unsigned int i = 1; i < m_BatchSize; i++)
@@ -72,9 +72,8 @@ ImagePreprocessor<uint8_t>::GetTestCaseData(unsigned int testCaseId)
for (size_t i=0; i<resizedSize; ++i)
{
- quantized[i] = armnn::Quantize<uint8_t>(resized[i],
- m_Scale,
- m_Offset);
+ quantized[i] = static_cast<uint8_t>(resized[i]);
}
+
return std::make_unique<TTestCaseData>(label, std::move(quantized));
}
diff --git a/tests/ImagePreprocessor.hpp b/tests/ImagePreprocessor.hpp
index d77113c6d9..cd586818c2 100644
--- a/tests/ImagePreprocessor.hpp
+++ b/tests/ImagePreprocessor.hpp
@@ -33,8 +33,7 @@ public:
unsigned int width,
unsigned int height,
const std::vector<ImageSet>& imageSet,
- float scale=1.0,
- int32_t offset=0,
+ float scale=255.0f,
const std::array<float, 3> mean={{0, 0, 0}},
const std::array<float, 3> stddev={{1, 1, 1}},
DataFormat dataFormat=DataFormat::NHWC,
@@ -44,7 +43,6 @@ public:
, m_Width(width)
, m_BatchSize(batchSize)
, m_Scale(scale)
- , m_Offset(offset)
, m_ImageSet(imageSet)
, m_Mean(mean)
, m_Stddev(stddev)
@@ -66,7 +64,6 @@ private:
unsigned int m_BatchSize;
// Quantization parameters
float m_Scale;
- int32_t m_Offset;
const std::vector<ImageSet> m_ImageSet;
const std::array<float, 3> m_Mean;
diff --git a/tests/InferenceTestImage.cpp b/tests/InferenceTestImage.cpp
index b011e6ac8f..92c67ae225 100644
--- a/tests/InferenceTestImage.cpp
+++ b/tests/InferenceTestImage.cpp
@@ -55,6 +55,7 @@ inline void PutData(std::vector<float> & data,
std::vector<float> ResizeBilinearAndNormalize(const InferenceTestImage & image,
const unsigned int outputWidth,
const unsigned int outputHeight,
+ const float scale,
const std::array<float, 3>& mean,
const std::array<float, 3>& stddev)
{
@@ -114,7 +115,7 @@ std::vector<float> ResizeBilinearAndNormalize(const InferenceTestImage & image,
const float ly0 = Lerp(float(rgb_x0y0[c]), float(rgb_x1y0[c]), xw);
const float ly1 = Lerp(float(rgb_x0y1[c]), float(rgb_x1y1[c]), xw);
const float l = Lerp(ly0, ly1, yw);
- PutData(out, outputWidth, x, y, c, ((l/255.0f) - mean[c])/stddev[c]);
+ PutData(out, outputWidth, x, y, c, ((l / scale) - mean[c]) / stddev[c]);
}
}
}
@@ -210,7 +211,8 @@ std::vector<float> InferenceTestImage::Resize(unsigned int newWidth,
const armnn::CheckLocation& location,
const ResizingMethods meth,
const std::array<float, 3>& mean,
- const std::array<float, 3>& stddev)
+ const std::array<float, 3>& stddev,
+ const float scale)
{
std::vector<float> out;
if (newWidth == 0 || newHeight == 0)
@@ -227,7 +229,7 @@ std::vector<float> InferenceTestImage::Resize(unsigned int newWidth,
}
case ResizingMethods::BilinearAndNormalized:
{
- out = ResizeBilinearAndNormalize(*this, newWidth, newHeight, mean, stddev);
+ out = ResizeBilinearAndNormalize(*this, newWidth, newHeight, scale, mean, stddev);
break;
}
default:
diff --git a/tests/InferenceTestImage.hpp b/tests/InferenceTestImage.hpp
index 59f4cc7073..643d060919 100644
--- a/tests/InferenceTestImage.hpp
+++ b/tests/InferenceTestImage.hpp
@@ -92,7 +92,8 @@ public:
const armnn::CheckLocation& location,
const ResizingMethods meth = ResizingMethods::STB,
const std::array<float, 3>& mean = {{0.0, 0.0, 0.0}},
- const std::array<float, 3>& stddev = {{1.0, 1.0, 1.0}});
+ const std::array<float, 3>& stddev = {{1.0, 1.0, 1.0}},
+ const float scale = 255.0f);
void Write(WriteFormat format, const char* filePath) const;
diff --git a/tests/OnnxMobileNet-Armnn/OnnxMobileNet-Armnn.cpp b/tests/OnnxMobileNet-Armnn/OnnxMobileNet-Armnn.cpp
index 79ee49e595..8786feacf9 100644
--- a/tests/OnnxMobileNet-Armnn/OnnxMobileNet-Armnn.cpp
+++ b/tests/OnnxMobileNet-Armnn/OnnxMobileNet-Armnn.cpp
@@ -40,8 +40,7 @@ int main(int argc, char* argv[])
224,
224,
imageSet,
- 1.0, // scale
- 0, // offset
+ 255.0, // scale
{{0.485f, 0.456f, 0.406f}}, // mean
{{0.229f, 0.224f, 0.225f}}, // stddev
DatabaseType::DataFormat::NCHW); // format
diff --git a/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp b/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
index 4fa0e140f1..bf5a865ac8 100644
--- a/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
+++ b/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
@@ -40,14 +40,12 @@ int main(int argc, char* argv[])
[&imageSet](const char* dataDir, const ModelType & model) {
// we need to get the input quantization parameters from
// the parsed model
- auto inputBinding = model.GetInputBindingInfo();
return DatabaseType(
dataDir,
299,
299,
imageSet,
- inputBinding.second.GetQuantizationScale(),
- inputBinding.second.GetQuantizationOffset());
+ 1);
},
&inputTensorShape);
}
diff --git a/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp b/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
index bcb9db8a62..b0af830ba7 100644
--- a/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
+++ b/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
@@ -40,14 +40,12 @@ int main(int argc, char* argv[])
[&imageSet](const char* dataDir, const ModelType & model) {
// we need to get the input quantization parameters from
// the parsed model
- auto inputBinding = model.GetInputBindingInfo();
return DatabaseType(
dataDir,
299,
299,
imageSet,
- inputBinding.second.GetQuantizationScale(),
- inputBinding.second.GetQuantizationOffset());
+ 1);
},
&inputTensorShape);
}
diff --git a/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp b/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp
index c676cd7355..4cf16d78cc 100644
--- a/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp
+++ b/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp
@@ -42,7 +42,9 @@ int main(int argc, char* argv[])
dataDir,
224,
224,
- imageSet);
+ imageSet,
+ 127.5f,
+ {0.5f,0.5f,0.5f});
},
&inputTensorShape);
}
diff --git a/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp b/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp
index 4d99e9ec86..70828498df 100644
--- a/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp
+++ b/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp
@@ -40,14 +40,12 @@ int main(int argc, char* argv[])
[&imageSet](const char* dataDir, const ModelType & model) {
// we need to get the input quantization parameters from
// the parsed model
- auto inputBinding = model.GetInputBindingInfo();
return DatabaseType(
dataDir,
128,
128,
imageSet,
- inputBinding.second.GetQuantizationScale(),
- inputBinding.second.GetQuantizationOffset(),
+ 1,
{{0, 0, 0}},
{{1, 1, 1}},
DatabaseType::DataFormat::NCHW,
diff --git a/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp b/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp
index 220964d061..1b411f9b94 100644
--- a/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp
+++ b/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp
@@ -108,14 +108,12 @@ int main(int argc, char* argv[])
[&imageSet](const char* dataDir, const ModelType & model) {
// we need to get the input quantization parameters from
// the parsed model
- auto inputBinding = model.GetInputBindingInfo();
return DatabaseType(
dataDir,
224,
224,
imageSet,
- inputBinding.second.GetQuantizationScale(),
- inputBinding.second.GetQuantizationOffset());
+ 1);
},
&inputTensorShape);
}
diff --git a/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp b/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp
index 5db5c243b1..9bc1034fea 100644
--- a/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp
+++ b/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp
@@ -40,14 +40,12 @@ int main(int argc, char* argv[])
[&imageSet](const char* dataDir, const ModelType & model) {
// we need to get the input quantization parameters from
// the parsed model
- auto inputBinding = model.GetInputBindingInfo();
return DatabaseType(
dataDir,
224,
224,
imageSet,
- inputBinding.second.GetQuantizationScale(),
- inputBinding.second.GetQuantizationOffset());
+ 1);
},
&inputTensorShape);
}
diff --git a/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp b/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
index 48e63211a3..98235e3f8a 100644
--- a/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
+++ b/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
@@ -40,14 +40,12 @@ int main(int argc, char* argv[])
[&imageSet](const char* dataDir, const ModelType & model) {
// we need to get the input quantization parameters from
// the parsed model
- auto inputBinding = model.GetInputBindingInfo();
return DatabaseType(
dataDir,
224,
224,
imageSet,
- inputBinding.second.GetQuantizationScale(),
- inputBinding.second.GetQuantizationOffset());
+ 1);
},
&inputTensorShape);
}
diff --git a/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp b/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp
index 36fc72cdf4..1e2ffbf568 100644
--- a/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp
+++ b/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp
@@ -28,7 +28,6 @@ int main(int argc, char* argv[])
using DatabaseType = ImagePreprocessor<DataType>;
using ParserType = armnnTfLiteParser::ITfLiteParser;
using ModelType = InferenceModel<ParserType, DataType>;
-
// Coverity fix: ClassifierInferenceTestMain() may throw uncaught exceptions.
retVal = armnn::test::ClassifierInferenceTestMain<DatabaseType,
ParserType>(
@@ -43,7 +42,9 @@ int main(int argc, char* argv[])
dataDir,
299,
299,
- imageSet);
+ imageSet,
+ 127.5f,
+ {0.5f,0.5f,0.5f});
},
&inputTensorShape);
}
diff --git a/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp b/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
index 0ba1e5dc00..2084d2d656 100644
--- a/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
+++ b/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
@@ -42,14 +42,12 @@ int main(int argc, char* argv[])
[&imageSet](const char* dataDir, const ModelType & model) {
// we need to get the input quantization parameters from
// the parsed model
- auto inputBinding = model.GetInputBindingInfo();
return DatabaseType(
dataDir,
224,
224,
imageSet,
- inputBinding.second.GetQuantizationScale(),
- inputBinding.second.GetQuantizationOffset(),
+ 1,
{{0, 0, 0}},
{{1, 1, 1}},
DatabaseType::DataFormat::NCHW,