From acb3ec51e51542d3011ed87842f87c2261abaaff Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Mon, 3 Apr 2023 19:57:00 +0100 Subject: GitHub #719 Set quantization parameter scale to 1.0, instead of 0.0. * Arm NN does not account for int8 or uint8 not quantized types, Tensorflow does. Not quantized int8 and uint8 is the same as quantized int8 and uint8 with scale = 1.0 and offset= 0 Default offset/zero_point was already 0, this review sets the default scale to 1.0. Signed-off-by: Teresa Charlin Change-Id: Ibc3eecc281de516c2cc706e17bde01c64ff9556e --- src/armnn/test/RuntimeTests.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'src/armnn') diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp index 67684448bb..41a3fe1b64 100644 --- a/src/armnn/test/RuntimeTests.cpp +++ b/src/armnn/test/RuntimeTests.cpp @@ -533,7 +533,9 @@ TEST_CASE("IVGCVSW_1929_QuantizedSoftmaxIssue") 0)); softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(armnn::TensorShape({ 1, 5 }), - armnn::DataType::QAsymmU8)); + armnn::DataType::QAsymmU8, + 0.0f, + 0)); std::vector backends = { armnn::Compute::CpuRef }; std::vector errMessages; -- cgit v1.2.1