From acb3ec51e51542d3011ed87842f87c2261abaaff Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Mon, 3 Apr 2023 19:57:00 +0100 Subject: GitHub #719 Set quantization parameter scale to 1.0, instead of 0.0. * Arm NN does not account for int8 or uint8 not quantized types, Tensorflow does. Not quantized int8 and uint8 is the same as quantized int8 and uint8 with scale = 1.0 and offset= 0 Default offset/zero_point was already 0, this review sets the default scale to 1.0. Signed-off-by: Teresa Charlin Change-Id: Ibc3eecc281de516c2cc706e17bde01c64ff9556e --- src/backends/neon/test/NeonCreateWorkloadTests.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src/backends/neon/test') diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp index 66718cc481..96429a84e1 100644 --- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp +++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -357,8 +357,8 @@ static void NeonCreateFullyConnectedWorkloadTest() auto outputHandle = PolymorphicDowncast(queueDescriptor.m_Outputs[0]); // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest). - float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0; - float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0; + float inputsQScale = 1.0f; + float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 1.0; CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale))); CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType, outputQScale))); } -- cgit v1.2.1