diff options
author | Teresa Charlin <teresa.charlinreyes@arm.com> | 2023-04-03 19:57:00 +0100 |
---|---|---|
committer | Colm Donelan <colm.donelan@arm.com> | 2023-04-18 17:27:41 +0000 |
commit | acb3ec51e51542d3011ed87842f87c2261abaaff (patch) | |
tree | b1ed73756c1db4a8e71b18a5a8256f42bb49341b /src/backends/reference/test/RefCreateWorkloadTests.cpp | |
parent | 8294e96a2f0f4ad3f5cd261079a6f90eee40142c (diff) | |
download | armnn-acb3ec51e51542d3011ed87842f87c2261abaaff.tar.gz |
GitHub #719 Set quantization parameter scale to 1.0, instead of 0.0.
* Arm NN does not account for int8 or uint8 not quantized types, Tensorflow does.
Not quantized int8 and uint8 is the same as quantized int8 and uint8 with scale = 1.0 and offset= 0
Default offset/zero_point was already 0, this review sets the default scale to 1.0.
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Ibc3eecc281de516c2cc706e17bde01c64ff9556e
Diffstat (limited to 'src/backends/reference/test/RefCreateWorkloadTests.cpp')
-rw-r--r-- | src/backends/reference/test/RefCreateWorkloadTests.cpp | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp index c46a9e5bac..894dd75ef2 100644 --- a/src/backends/reference/test/RefCreateWorkloadTests.cpp +++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp @@ -1,5 +1,5 @@ // -// Copyright © 2017 Arm Ltd and Contributors. All rights reserved. +// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // @@ -479,8 +479,8 @@ TEST_CASE("RefCreateFullyConnectedWithBlobWorkloadTest") armnn::DataType::Float32>(factory, graph); // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest). - float inputsQScale = 0.0f; - float outputQScale = 0.0f; + float inputsQScale = 1.0f; + float outputQScale = 1.0f; CheckInputOutput(std::move(workload), TensorInfo({ 3, 1, 4, 5 }, armnn::DataType::Float32, inputsQScale), TensorInfo({ 3, 7 }, armnn::DataType::Float32, outputQScale)); @@ -496,8 +496,8 @@ TEST_CASE("CreateFullyConnectedWorkloadWeightsBiasesAsInputsFloat32") armnn::DataType::Float32>(factory, graph); // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest). - float inputsQScale = 0.0f; - float outputQScale = 0.0f; + float inputsQScale = 1.0f; + float outputQScale = 1.0f; CheckInputsOutput(std::move(workload), TensorInfo({ 3, 1, 4, 5 }, armnn::DataType::Float32, inputsQScale), TensorInfo({ 7, 20 }, armnn::DataType::Float32, inputsQScale), @@ -512,8 +512,8 @@ static void RefCreateFullyConnectedWorkloadTest() auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph); // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest). - float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0; - float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0; + float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 1.0f; + float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 1.0f; CheckInputOutput(std::move(workload), TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale), TensorInfo({ 3, 7 }, DataType, outputQScale)); |