From 0d0a78ebd60e058746ea161914b45709245d4e1b Mon Sep 17 00:00:00 2001 From: Francis Murtagh Date: Fri, 22 Feb 2019 16:35:13 +0000 Subject: IVGCVSW-2721 Quantize and Dequantize aren't quite right * Add check for infinity and negative infinity in quantize() * Add assert for NaN value in quantize and dequantize() * Add unit tests for infinity and negative infinity Change-Id: Ie60e1e15b289ccbf99df4a3281f067b82cc9f9bf Signed-off-by: Francis Murtagh --- include/armnn/TypesUtils.hpp | 15 +++++++-------- src/armnn/test/QuantizerTest.cpp | 29 +++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp index c65eefc510..e7652649b0 100644 --- a/include/armnn/TypesUtils.hpp +++ b/include/armnn/TypesUtils.hpp @@ -189,18 +189,16 @@ inline std::ostream & operator<<(std::ostream & os, const armnn::TensorShape & s template inline QuantizedType Quantize(float value, float scale, int32_t offset) { - // TODO : check we act sensibly for Inf, NaN and -Inf - // see IVGCVSW-1849 static_assert(IsQuantizedType(), "Not an integer type."); constexpr QuantizedType max = std::numeric_limits::max(); constexpr QuantizedType min = std::numeric_limits::lowest(); BOOST_ASSERT(scale != 0.f); - int quantized = boost::numeric_cast(round(value / scale)) + offset; - QuantizedType quantizedBits = quantized <= min - ? min - : quantized >= max - ? max - : static_cast(quantized); + BOOST_ASSERT(!std::isnan(value)); + + float clampedValue = std::min(std::max(static_cast(round(value/scale) + offset), static_cast(min)), + static_cast(max)); + auto quantizedBits = static_cast(clampedValue); + return quantizedBits; } @@ -215,6 +213,7 @@ inline float Dequantize(QuantizedType value, float scale, int32_t offset) { static_assert(IsQuantizedType(), "Not an integer type."); BOOST_ASSERT(scale != 0.f); + BOOST_ASSERT(!std::isnan(value)); float dequantized = boost::numeric_cast(value - offset) * scale; return dequantized; } diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index 548203a6a9..fcce208c59 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -14,6 +14,7 @@ #include "../NetworkQuantizerUtils.hpp" #include "../OverrideInputRangeVisitor.hpp" #include "../RangeTracker.hpp" +#include "../backends/backendsCommon/test/QuantizeHelper.hpp" #include @@ -1215,5 +1216,33 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchToSpace) VisitLayersTopologically(quantizedNetwork.get(), validator); } +std::vector SetupQuantize(float value) +{ + armnn::TensorInfo inputInfo({ 1, 2, 2 }, armnn::DataType::Float32); + inputInfo.SetQuantizationScale(1.0f); + inputInfo.SetQuantizationOffset(1); + std::vector input({ + value, 0.0f, + 0.0f, 1.0f + }); + const std::vector &inputRef = input; + + auto output = QuantizedVector(inputInfo.GetQuantizationScale(), + inputInfo.GetQuantizationOffset(), + inputRef); + + return output; +} + +BOOST_AUTO_TEST_CASE(QuantizeInf) +{ + BOOST_CHECK_EQUAL(SetupQuantize(std::numeric_limits::infinity())[0], 255); +} + +BOOST_AUTO_TEST_CASE(QuantizeNegativeInf) +{ + BOOST_CHECK_EQUAL(SetupQuantize(-1 * std::numeric_limits::infinity())[0], 0); +} + BOOST_AUTO_TEST_SUITE_END() } // namespace armnn -- cgit v1.2.1