aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-04-03 19:57:00 +0100
committerColm Donelan <colm.donelan@arm.com>2023-04-18 17:27:41 +0000
commitacb3ec51e51542d3011ed87842f87c2261abaaff (patch)
treeb1ed73756c1db4a8e71b18a5a8256f42bb49341b
parent8294e96a2f0f4ad3f5cd261079a6f90eee40142c (diff)
downloadarmnn-acb3ec51e51542d3011ed87842f87c2261abaaff.tar.gz
GitHub #719 Set quantization parameter scale to 1.0, instead of 0.0.
* Arm NN does not account for int8 or uint8 not quantized types, Tensorflow does. Not quantized int8 and uint8 is the same as quantized int8 and uint8 with scale = 1.0 and offset= 0 Default offset/zero_point was already 0, this review sets the default scale to 1.0. Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ibc3eecc281de516c2cc706e17bde01c64ff9556e
-rw-r--r--include/armnn/Tensor.hpp6
-rw-r--r--src/armnn/test/RuntimeTests.cpp4
-rw-r--r--src/armnnDeserializer/test/DeserializeFill.cpp4
-rw-r--r--src/armnnSerializer/test/LstmSerializationTests.cpp10
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp14
-rw-r--r--src/armnnTestUtils/CreateWorkload.hpp32
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp2
-rw-r--r--src/armnnTfLiteParser/test/DetectionPostProcess.cpp10
-rw-r--r--src/backends/backendsCommon/test/WorkloadDataValidation.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp18
-rw-r--r--src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp8
-rw-r--r--src/backends/neon/test/NeonCreateWorkloadTests.cpp6
-rw-r--r--src/backends/reference/test/RefCreateWorkloadTests.cpp14
17 files changed, 76 insertions, 74 deletions
diff --git a/include/armnn/Tensor.hpp b/include/armnn/Tensor.hpp
index 46859c4887..1bbc19f2f1 100644
--- a/include/armnn/Tensor.hpp
+++ b/include/armnn/Tensor.hpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
@@ -157,14 +157,14 @@ public:
TensorInfo(const TensorShape& shape,
DataType dataType,
- float quantizationScale = 0.0f,
+ float quantizationScale = 1.0f,
int32_t quantizationOffset = 0,
bool isConstant = false);
TensorInfo(unsigned int numDimensions,
const unsigned int* dimensionSizes,
DataType dataType,
- float quantizationScale = 0.0f,
+ float quantizationScale = 1.0f,
int32_t quantizationOffset = 0,
bool isConstant = false);
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 67684448bb..41a3fe1b64 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -533,7 +533,9 @@ TEST_CASE("IVGCVSW_1929_QuantizedSoftmaxIssue")
0));
softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(armnn::TensorShape({ 1, 5 }),
- armnn::DataType::QAsymmU8));
+ armnn::DataType::QAsymmU8,
+ 0.0f,
+ 0));
std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
std::vector<std::string> errMessages;
diff --git a/src/armnnDeserializer/test/DeserializeFill.cpp b/src/armnnDeserializer/test/DeserializeFill.cpp
index 2a961b42cc..56ff9828be 100644
--- a/src/armnnDeserializer/test/DeserializeFill.cpp
+++ b/src/armnnDeserializer/test/DeserializeFill.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -34,7 +34,7 @@ struct FillFixture : public ParserFlatbuffersSerializeFixture
4
],
dataType: "Signed32",
- quantizationScale: 0.0
+ quantizationScale: 1.0
}
}
]
diff --git a/src/armnnSerializer/test/LstmSerializationTests.cpp b/src/armnnSerializer/test/LstmSerializationTests.cpp
index ae2d813fc0..ff96a4bb85 100644
--- a/src/armnnSerializer/test/LstmSerializationTests.cpp
+++ b/src/armnnSerializer/test/LstmSerializationTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -1318,10 +1318,10 @@ TEST_CASE("EnsureLstmLayersBackwardCompatibility")
params.m_CellToOutputWeights = &cellToOutputWeights;
const std::string layerName("lstm");
- armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32);
- armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32);
- armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32);
- armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32);
+ armnn::TensorInfo inputTensorInfo({ batchSize, inputSize }, armnn::DataType::Float32, 0.0f , 0);
+ armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits}, armnn::DataType::Float32, 0.0f , 0);
+ armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize }, armnn::DataType::Float32, 0.0f , 0);
+ armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * 4 }, armnn::DataType::Float32, 0.0f , 0);
VerifyLstmLayer<armnn::LstmDescriptor> checker(
layerName,
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 3998ee730d..90d778991b 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -1553,7 +1553,7 @@ TEST_CASE("EnsureL2NormalizationBackwardCompatibility")
CHECK(deserializedNetwork);
const std::string layerName("l2Normalization");
- const armnn::TensorInfo inputInfo = armnn::TensorInfo({1, 2, 1, 5}, armnn::DataType::Float32);
+ const armnn::TensorInfo inputInfo = armnn::TensorInfo({1, 2, 1, 5}, armnn::DataType::Float32, 0.0f, 0);
armnn::L2NormalizationDescriptor desc;
desc.m_DataLayout = armnn::DataLayout::NCHW;
@@ -1805,8 +1805,8 @@ TEST_CASE("EnsureMergerLayerBackwardCompatibility")
armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(std::string(mergerModel.begin(), mergerModel.end()));
CHECK(deserializedNetwork);
- const armnn::TensorInfo inputInfo = armnn::TensorInfo({ 2, 3, 2, 2 }, armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo = armnn::TensorInfo({ 4, 3, 2, 2 }, armnn::DataType::Float32);
+ const armnn::TensorInfo inputInfo = armnn::TensorInfo({ 2, 3, 2, 2 }, armnn::DataType::Float32, 0.0f, 0);
+ const armnn::TensorInfo outputInfo = armnn::TensorInfo({ 4, 3, 2, 2 }, armnn::DataType::Float32, 0.0f, 0);
const std::vector<armnn::TensorShape> shapes({inputInfo.GetShape(), inputInfo.GetShape()});
@@ -2071,8 +2071,8 @@ TEST_CASE("EnsurePadBackwardCompatibility")
armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(std::string(padModel.begin(), padModel.end()));
CHECK(deserializedNetwork);
- const armnn::TensorInfo inputInfo = armnn::TensorInfo({ 1, 2, 3, 4 }, armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo = armnn::TensorInfo({ 1, 3, 5, 7 }, armnn::DataType::Float32);
+ const armnn::TensorInfo inputInfo = armnn::TensorInfo({ 1, 2, 3, 4 }, armnn::DataType::Float32, 0.0f, 0);
+ const armnn::TensorInfo outputInfo = armnn::TensorInfo({ 1, 3, 5, 7 }, armnn::DataType::Float32, 0.0f, 0);
armnn::PadDescriptor descriptor({{ 0, 0 }, { 1, 0 }, { 1, 1 }, { 1, 2 }});
@@ -2441,8 +2441,8 @@ TEST_CASE("EnsureResizeBilinearBackwardCompatibility")
DeserializeNetwork(std::string(resizeBilinearModel.begin(), resizeBilinearModel.end()));
CHECK(deserializedNetwork);
- const armnn::TensorInfo inputInfo = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32);
- const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32);
+ const armnn::TensorInfo inputInfo = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32, 0.0f, 0);
+ const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32, 0.0f, 0);
armnn::ResizeDescriptor descriptor;
descriptor.m_TargetWidth = 4u;
diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp
index 5e11ab6258..637f035365 100644
--- a/src/armnnTestUtils/CreateWorkload.hpp
+++ b/src/armnnTestUtils/CreateWorkload.hpp
@@ -521,8 +521,8 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IW
layerDesc.m_BiasEnabled = false;
layerDesc.m_DataLayout = dataLayout;
- float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+ float inputsQScale = 1.0f;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 1.0;
Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
@@ -585,8 +585,8 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dFusedActivationWithBlo
layerDesc.m_BiasEnabled = true;
layerDesc.m_DataLayout = dataLayout;
- float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+ float inputsQScale = 1.0f;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 1.0;
Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
@@ -678,8 +678,8 @@ std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadFastMathTest(a
layerDesc.m_BiasEnabled = true;
layerDesc.m_DataLayout = dataLayout;
- float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+ float inputsQScale = 1.0f;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 1.0;
Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
@@ -1141,8 +1141,8 @@ std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(arm
Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
- float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+ float inputsQScale = 1.0f;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 1.0;
TensorShape biasShape = TensorShape{ 2 };
TensorShape weightShape = TensorShape{ 2, 3, 3, 3 };
@@ -1203,8 +1203,8 @@ std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolutio
layerDesc.m_BiasEnabled = false;
layerDesc.m_DataLayout = dataLayout;
- float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+ float inputsQScale = 1.0f;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 1.0;
TensorShape weightShape({1, 4, 4, 2});
TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
@@ -1257,8 +1257,8 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
- float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+ float inputsQScale = 1.0f;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 1.0;
armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale);
weightsTensorInfo.SetConstant();
@@ -1302,8 +1302,8 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest
FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
- float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+ float inputsQScale = 1.0f;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 1.0;
armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale);
armnn::TensorInfo biasesTensorInfo({7}, GetBiasDataType(DataType), inputsQScale);
@@ -1378,8 +1378,8 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadWeightsBiase
FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
- float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+ float inputsQScale = 1.0f;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 1.0;
// Creates extra layers with weights and biases as input layers.
Layer* const input = graph.AddLayer<InputLayer>(1, "input");
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index c787212359..ee4cadd216 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -524,7 +524,7 @@ armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
}
}
- float quantizationScale = 0.0f;
+ float quantizationScale = 1.0f;
int32_t quantizationOffset = 0;
if (tensorPtr->quantization.get())
diff --git a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
index 2f9f29c223..9d807d8c5d 100644
--- a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
+++ b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -288,10 +288,10 @@ TEST_CASE_FIXTURE(ParseDetectionPostProcessCustomOptions, "DetectionPostProcessG
armnn::TensorInfo scoresTensor(armnn::TensorShape({ 1, 6, 3 }), armnn::DataType::QAsymmU8,
0.00999999978f, 0);
- armnn::TensorInfo detectionBoxesTensor(armnn::TensorShape({ 1, 3, 4 }), armnn::DataType::Float32, 0, 0);
- armnn::TensorInfo detectionClassesTensor(armnn::TensorShape({ 1, 3 }), armnn::DataType::Float32, 0, 0);
- armnn::TensorInfo detectionScoresTensor(armnn::TensorShape({ 1, 3 }), armnn::DataType::Float32, 0, 0);
- armnn::TensorInfo numDetectionsTensor(armnn::TensorShape({ 1} ), armnn::DataType::Float32, 0, 0);
+ armnn::TensorInfo detectionBoxesTensor(armnn::TensorShape({ 1, 3, 4 }), armnn::DataType::Float32);
+ armnn::TensorInfo detectionClassesTensor(armnn::TensorShape({ 1, 3 }), armnn::DataType::Float32);
+ armnn::TensorInfo detectionScoresTensor(armnn::TensorShape({ 1, 3 }), armnn::DataType::Float32);
+ armnn::TensorInfo numDetectionsTensor(armnn::TensorShape({ 1 } ), armnn::DataType::Float32);
CHECK(IsConnected(boxEncodingLayer, detectionPostProcessLayer, 0, 0, boxEncodingTensor));
CHECK(IsConnected(scoresLayer, detectionPostProcessLayer, 0, 1, scoresTensor));
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index 2a09f6508f..6125e62cb2 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -494,7 +494,7 @@ TEST_CASE("LstmQueueDescriptor_Validate")
{
armnn::DataType dataType = armnn::DataType::Float32;
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
unsigned int batchSize = 2;
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 1ef47ddf7b..1dcbdfac9e 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -316,7 +316,7 @@ LayerTestResult<T, 4> ConstantLinearActivationTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- float qScale = 0.0f,
+ float qScale = 1.0f,
int32_t qOffset = 0)
{
IgnoreUnused(memoryManager);
@@ -1226,7 +1226,7 @@ LayerTestResult<T, 4> CompareActivationTestImpl(
const armnn::ITensorHandleFactory& refTensorHandleFactory,
armnn::ActivationFunction f,
unsigned int batchSize = 5,
- float qScale = 0.0f,
+ float qScale = 1.0f,
int32_t qOffset = 0)
{
IgnoreUnused(memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index f7519a73bc..35496ce3b2 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2022-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -172,7 +172,7 @@ LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
bool transposeWeights,
- float qScale = 0.0f,
+ float qScale = 1.0f,
int32_t qOffset = 0)
{
unsigned int inputWidth = 1;
diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
index 8d03ff6ea9..ff58506444 100644
--- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -32,7 +32,7 @@ LayerTestResult<T, 4> InstanceNormTestImpl(
const std::vector<float>& inputValues,
const std::vector<float>& expectedOutputValues,
armnn::InstanceNormalizationQueueDescriptor descriptor,
- float qScale = 0.0f,
+ float qScale = 1.0f,
int32_t qOffset = 0)
{
IgnoreUnused(memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index 2eaaeb5c9b..d666dcbeb1 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -33,7 +33,7 @@ void LstmUtilsVectorBatchVectorAddTestImpl(
std::vector<float>& expectedOutput,
armnn::TensorShape& expectedShape)
{
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType, qScale, qOffset );
@@ -60,7 +60,7 @@ void LstmUtilsZeroVectorTestImpl(
std::vector<float>& expectedOutput,
armnn::TensorShape& expectedShape)
{
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo tensorInfo({vSize}, ArmnnType, qScale, qOffset );
@@ -89,7 +89,7 @@ void LstmUtilsMeanStddevNormalizationTestImpl(
std::vector<float>& expectedOutput,
armnn::TensorShape& expectedShape)
{
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType, qScale, qOffset );
@@ -117,7 +117,7 @@ void LstmUtilsVectorBatchVectorCwiseProductTestImpl(
std::vector<float>& expectedOutput,
armnn::TensorShape& expectedShape)
{
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType, qScale, qOffset );
@@ -149,7 +149,7 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl(
const std::vector<T>& outputExpected,
const armnn::TensorShape& inputShape,
const armnn::TensorShape& outputExpectedShape,
- float qScale = 0.0f,
+ float qScale = 1.0f,
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
@@ -345,7 +345,7 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
const armnn::ITensorHandleFactory& tensorHandleFactory,
const std::vector<T>& input,
const std::vector<T>& outputExpected,
- float qScale = 0.0f,
+ float qScale = 1.0f,
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
@@ -1020,7 +1020,7 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
const std::vector<T>& outputExpected,
const armnn::TensorShape& inputShape,
const armnn::TensorShape& outputExpectedShape,
- float qScale = 0.0f,
+ float qScale = 1.0f,
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
@@ -1252,7 +1252,7 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF
const armnn::ITensorHandleFactory& tensorHandleFactory,
const std::vector<T>& input,
const std::vector<T>& outputExpected,
- float qScale = 0.0f,
+ float qScale = 1.0f,
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
index 9e9c29b088..9e3d83c0f4 100644
--- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -22,7 +22,7 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::ITensorHandleFactory& tensorHandleFactory,
- float qScale = 0.0f,
+ float qScale = 1.0f,
int32_t qOffset = 0)
{
IgnoreUnused(memoryManager);
diff --git a/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp
index 6effa9c85d..4a63d39800 100644
--- a/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -26,7 +26,7 @@ UnidirectionalSequenceLstmTimeMajorSingleBatchTestImpl(
const std::vector<T>& outputExpected,
const armnn::TensorShape& inputShape,
const armnn::TensorShape& outputExpectedShape,
- float qScale = 0.0f,
+ float qScale = 1.0f,
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
@@ -222,7 +222,7 @@ LayerTestResult<T, 3> UnidirectionalSequenceLstmLayerFloat32TestImpl(
const std::vector<T>& outputExpected,
const armnn::TensorShape& inputShape,
const armnn::TensorShape& outputExpectedShape,
- float qScale = 0.0f,
+ float qScale = 1.0f,
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32) {
IgnoreUnused(memoryManager);
@@ -411,7 +411,7 @@ UnidirectionalSequenceLstmLayerFloat32TimeMajorTestImpl(
const std::vector<T>& outputExpected,
const armnn::TensorShape& inputShape,
const armnn::TensorShape& outputExpectedShape,
- float qScale = 0.0f,
+ float qScale = 1.0f,
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32) {
IgnoreUnused(memoryManager);
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index 66718cc481..96429a84e1 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -357,8 +357,8 @@ static void NeonCreateFullyConnectedWorkloadTest()
auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
// Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
- float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+ float inputsQScale = 1.0f;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 1.0;
CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale)));
CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType, outputQScale)));
}
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index c46a9e5bac..894dd75ef2 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
@@ -479,8 +479,8 @@ TEST_CASE("RefCreateFullyConnectedWithBlobWorkloadTest")
armnn::DataType::Float32>(factory, graph);
// Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
- float inputsQScale = 0.0f;
- float outputQScale = 0.0f;
+ float inputsQScale = 1.0f;
+ float outputQScale = 1.0f;
CheckInputOutput(std::move(workload),
TensorInfo({ 3, 1, 4, 5 }, armnn::DataType::Float32, inputsQScale),
TensorInfo({ 3, 7 }, armnn::DataType::Float32, outputQScale));
@@ -496,8 +496,8 @@ TEST_CASE("CreateFullyConnectedWorkloadWeightsBiasesAsInputsFloat32")
armnn::DataType::Float32>(factory, graph);
// Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
- float inputsQScale = 0.0f;
- float outputQScale = 0.0f;
+ float inputsQScale = 1.0f;
+ float outputQScale = 1.0f;
CheckInputsOutput(std::move(workload),
TensorInfo({ 3, 1, 4, 5 }, armnn::DataType::Float32, inputsQScale),
TensorInfo({ 7, 20 }, armnn::DataType::Float32, inputsQScale),
@@ -512,8 +512,8 @@ static void RefCreateFullyConnectedWorkloadTest()
auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
// Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
- float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 1.0f;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 1.0f;
CheckInputOutput(std::move(workload),
TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
TensorInfo({ 3, 7 }, DataType, outputQScale));