aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-04-03 19:57:00 +0100
committerColm Donelan <colm.donelan@arm.com>2023-04-18 17:27:41 +0000
commitacb3ec51e51542d3011ed87842f87c2261abaaff (patch)
treeb1ed73756c1db4a8e71b18a5a8256f42bb49341b /src/armnn
parent8294e96a2f0f4ad3f5cd261079a6f90eee40142c (diff)
downloadarmnn-acb3ec51e51542d3011ed87842f87c2261abaaff.tar.gz
GitHub #719 Set quantization parameter scale to 1.0, instead of 0.0.
* Arm NN does not account for int8 or uint8 not quantized types, Tensorflow does. Not quantized int8 and uint8 is the same as quantized int8 and uint8 with scale = 1.0 and offset= 0 Default offset/zero_point was already 0, this review sets the default scale to 1.0. Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ibc3eecc281de516c2cc706e17bde01c64ff9556e
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/test/RuntimeTests.cpp4
1 files changed, 3 insertions, 1 deletions
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 67684448bb..41a3fe1b64 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -533,7 +533,9 @@ TEST_CASE("IVGCVSW_1929_QuantizedSoftmaxIssue")
0));
softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(armnn::TensorShape({ 1, 5 }),
- armnn::DataType::QAsymmU8));
+ armnn::DataType::QAsymmU8,
+ 0.0f,
+ 0));
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
std::vector<std::string> errMessages;