From b2845655b7814470c2a52cd7d0bee01031615bfc Mon Sep 17 00:00:00 2001 From: Ferran Balaguer Date: Wed, 27 Feb 2019 09:42:06 +0000 Subject: IVGCVSW-2632 Fix RefMerger from QAsymm8 types with different quantization parameters Change-Id: Ie67ce4966c5e5fef618876b027292da429de1485 Signed-off-by: Ferran Balaguer --- src/armnn/NetworkQuantizerUtils.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/armnn/NetworkQuantizerUtils.cpp') diff --git a/src/armnn/NetworkQuantizerUtils.cpp b/src/armnn/NetworkQuantizerUtils.cpp index 1bec63b58c..551760f362 100644 --- a/src/armnn/NetworkQuantizerUtils.cpp +++ b/src/armnn/NetworkQuantizerUtils.cpp @@ -12,7 +12,7 @@ namespace armnn { -std::pair ComputeQAsymmParams(int numBits, double min, double max) +std::pair ComputeQAsymmParams(int numBits, double min, double max) { BOOST_ASSERT_MSG(min < max, "min >= max will result in invalid quantization."); double highest = (1 << numBits) - 1; @@ -27,7 +27,7 @@ std::pair ComputeQAsymmParams(int numBits, double min, double max) // Clamp offset [0-highest] offset = std::max(0.0, std::min(highest, offset)); - return std::make_pair(static_cast(std::round(offset)), static_cast(scale)); + return std::make_pair(static_cast(scale), static_cast(std::round(offset))); } ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector& backing) -- cgit v1.2.1