aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTfLiteParser
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-04-03 19:57:00 +0100
committerColm Donelan <colm.donelan@arm.com>2023-04-18 17:27:41 +0000
commitacb3ec51e51542d3011ed87842f87c2261abaaff (patch)
treeb1ed73756c1db4a8e71b18a5a8256f42bb49341b /src/armnnTfLiteParser
parent8294e96a2f0f4ad3f5cd261079a6f90eee40142c (diff)
downloadarmnn-acb3ec51e51542d3011ed87842f87c2261abaaff.tar.gz
GitHub #719 Set quantization parameter scale to 1.0, instead of 0.0.
* Arm NN does not account for int8 or uint8 not quantized types, Tensorflow does. Not quantized int8 and uint8 is the same as quantized int8 and uint8 with scale = 1.0 and offset= 0 Default offset/zero_point was already 0, this review sets the default scale to 1.0. Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ibc3eecc281de516c2cc706e17bde01c64ff9556e
Diffstat (limited to 'src/armnnTfLiteParser')
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp2
-rw-r--r--src/armnnTfLiteParser/test/DetectionPostProcess.cpp10
2 files changed, 6 insertions, 6 deletions
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index c787212359..ee4cadd216 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -524,7 +524,7 @@ armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
}
}
- float quantizationScale = 0.0f;
+ float quantizationScale = 1.0f;
int32_t quantizationOffset = 0;
if (tensorPtr->quantization.get())
diff --git a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
index 2f9f29c223..9d807d8c5d 100644
--- a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
+++ b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -288,10 +288,10 @@ TEST_CASE_FIXTURE(ParseDetectionPostProcessCustomOptions, "DetectionPostProcessG
armnn::TensorInfo scoresTensor(armnn::TensorShape({ 1, 6, 3 }), armnn::DataType::QAsymmU8,
0.00999999978f, 0);
- armnn::TensorInfo detectionBoxesTensor(armnn::TensorShape({ 1, 3, 4 }), armnn::DataType::Float32, 0, 0);
- armnn::TensorInfo detectionClassesTensor(armnn::TensorShape({ 1, 3 }), armnn::DataType::Float32, 0, 0);
- armnn::TensorInfo detectionScoresTensor(armnn::TensorShape({ 1, 3 }), armnn::DataType::Float32, 0, 0);
- armnn::TensorInfo numDetectionsTensor(armnn::TensorShape({ 1} ), armnn::DataType::Float32, 0, 0);
+ armnn::TensorInfo detectionBoxesTensor(armnn::TensorShape({ 1, 3, 4 }), armnn::DataType::Float32);
+ armnn::TensorInfo detectionClassesTensor(armnn::TensorShape({ 1, 3 }), armnn::DataType::Float32);
+ armnn::TensorInfo detectionScoresTensor(armnn::TensorShape({ 1, 3 }), armnn::DataType::Float32);
+ armnn::TensorInfo numDetectionsTensor(armnn::TensorShape({ 1 } ), armnn::DataType::Float32);
CHECK(IsConnected(boxEncodingLayer, detectionPostProcessLayer, 0, 0, boxEncodingTensor));
CHECK(IsConnected(scoresLayer, detectionPostProcessLayer, 0, 1, scoresTensor));