aboutsummaryrefslogtreecommitdiff
path: root/src/armnnSerializer/test
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-04-03 19:57:00 +0100
committerColm Donelan <colm.donelan@arm.com>2023-04-18 17:27:41 +0000
commitacb3ec51e51542d3011ed87842f87c2261abaaff (patch)
treeb1ed73756c1db4a8e71b18a5a8256f42bb49341b /src/armnnSerializer/test
parent8294e96a2f0f4ad3f5cd261079a6f90eee40142c (diff)
downloadarmnn-acb3ec51e51542d3011ed87842f87c2261abaaff.tar.gz
GitHub #719 Set quantization parameter scale to 1.0, instead of 0.0.
* Arm NN does not account for int8 or uint8 not quantized types, Tensorflow does. Not quantized int8 and uint8 is the same as quantized int8 and uint8 with scale = 1.0 and offset= 0 Default offset/zero_point was already 0, this review sets the default scale to 1.0. Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ibc3eecc281de516c2cc706e17bde01c64ff9556e
Diffstat (limited to 'src/armnnSerializer/test')
-rw-r--r--src/armnnSerializer/test/LstmSerializationTests.cpp10
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp14
2 files changed, 12 insertions, 12 deletions
diff --git a/src/armnnSerializer/test/LstmSerializationTests.cpp b/src/armnnSerializer/test/LstmSerializationTests.cpp
index ae2d813fc0..ff96a4bb85 100644
--- a/src/armnnSerializer/test/LstmSerializationTests.cpp
+++ b/src/armnnSerializer/test/LstmSerializationTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -1318,10 +1318,10 @@ TEST_CASE("EnsureLstmLayersBackwardCompatibility")
params.m_CellToOutputWeights = &cellToOutputWeights;
const std::string layerName("lstm");
- armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32);
- armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32);
- armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32);
- armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32);
+ armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32, 0.0f , 0);
+ armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32, 0.0f , 0);
+ armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32, 0.0f , 0);
+ armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32, 0.0f , 0);
VerifyLstmLayer<armnn::LstmDescriptor> checker(
layerName,
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 3998ee730d..90d778991b 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -1553,7 +1553,7 @@ TEST_CASE("EnsureL2NormalizationBackwardCompatibility")
CHECK(deserializedNetwork);
const std::string layerName("l2Normalization");
- const armnn::TensorInfo inputInfo = armnn::TensorInfo({1, 2, 1, 5}, armnn::DataType::Float32);
+ const armnn::TensorInfo inputInfo = armnn::TensorInfo({1, 2, 1, 5}, armnn::DataType::Float32, 0.0f, 0);
armnn::L2NormalizationDescriptor desc;
desc.m_DataLayout = armnn::DataLayout::NCHW;
@@ -1805,8 +1805,8 @@ TEST_CASE("EnsureMergerLayerBackwardCompatibility")
armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(std::string(mergerModel.begin(), mergerModel.end()));
CHECK(deserializedNetwork);
- const armnn::TensorInfo inputInfo = armnn::TensorInfo({ 2, 3, 2, 2 }, armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo = armnn::TensorInfo({ 4, 3, 2, 2 }, armnn::DataType::Float32);
+ const armnn::TensorInfo inputInfo = armnn::TensorInfo({ 2, 3, 2, 2 }, armnn::DataType::Float32, 0.0f, 0);
+ const armnn::TensorInfo outputInfo = armnn::TensorInfo({ 4, 3, 2, 2 }, armnn::DataType::Float32, 0.0f, 0);
const std::vector<armnn::TensorShape> shapes({inputInfo.GetShape(), inputInfo.GetShape()});
@@ -2071,8 +2071,8 @@ TEST_CASE("EnsurePadBackwardCompatibility")
armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(std::string(padModel.begin(), padModel.end()));
CHECK(deserializedNetwork);
- const armnn::TensorInfo inputInfo = armnn::TensorInfo({ 1, 2, 3, 4 }, armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo = armnn::TensorInfo({ 1, 3, 5, 7 }, armnn::DataType::Float32);
+ const armnn::TensorInfo inputInfo = armnn::TensorInfo({ 1, 2, 3, 4 }, armnn::DataType::Float32, 0.0f, 0);
+ const armnn::TensorInfo outputInfo = armnn::TensorInfo({ 1, 3, 5, 7 }, armnn::DataType::Float32, 0.0f, 0);
armnn::PadDescriptor descriptor({{ 0, 0 }, { 1, 0 }, { 1, 1 }, { 1, 2 }});
@@ -2441,8 +2441,8 @@ TEST_CASE("EnsureResizeBilinearBackwardCompatibility")
DeserializeNetwork(std::string(resizeBilinearModel.begin(), resizeBilinearModel.end()));
CHECK(deserializedNetwork);
- const armnn::TensorInfo inputInfo = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32);
+ const armnn::TensorInfo inputInfo = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32, 0.0f, 0);
+ const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32, 0.0f, 0);
armnn::ResizeDescriptor descriptor;
descriptor.m_TargetWidth = 4u;