From b2845655b7814470c2a52cd7d0bee01031615bfc Mon Sep 17 00:00:00 2001 From: Ferran Balaguer Date: Wed, 27 Feb 2019 09:42:06 +0000 Subject: IVGCVSW-2632 Fix RefMerger from QAsymm8 types with different quantization parameters Change-Id: Ie67ce4966c5e5fef618876b027292da429de1485 Signed-off-by: Ferran Balaguer --- src/armnn/test/QuantizerTest.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src/armnn/test') diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp index fcce208c59..f7723bd0c0 100644 --- a/src/armnn/test/QuantizerTest.cpp +++ b/src/armnn/test/QuantizerTest.cpp @@ -997,15 +997,15 @@ BOOST_AUTO_TEST_CASE(QuantizeMerger) const OriginsDescriptor& mergerDescriptor, const char* name = nullptr) { - std::pair expectedValues = ComputeQAsymmParams(8, m_Min, m_Max); + std::pair expectedValues = ComputeQAsymmParams(8, m_Min, m_Max); TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo(); BOOST_TEST((info.GetDataType() == DataType::QuantisedAsymm8)); - BOOST_TEST((info.GetQuantizationOffset() == expectedValues.first)); + BOOST_TEST((info.GetQuantizationOffset() == expectedValues.second)); - BOOST_CHECK_CLOSE(info.GetQuantizationScale(), expectedValues.second, 0.000001f); + BOOST_CHECK_CLOSE(info.GetQuantizationScale(), expectedValues.first, 0.000001f); } private: -- cgit v1.2.1