aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCathal Corbett <cathal.corbett@arm.com>2021-11-10 12:50:57 +0000
committerDavid Monahan <David.Monahan@arm.com>2021-11-11 15:19:14 +0000
commit7e4dc4729d2af8b554be52206fc89bbe1dc21882 (patch)
treeab463bf2b2abbca151f01ef7a91816633d2f7e24
parented371231be5282c77c6c3591309345a0deb9e365 (diff)
downloadarmnn-7e4dc4729d2af8b554be52206fc89bbe1dc21882.tar.gz
Fix typo errors from ticket IVGCVSW-6420
* Typo errors from ticket 'Constant flag in tensor info is not set correctly'. Not fixed due to code freeze deadline. Signed-off-by: Cathal Corbett <cathal.corbett@arm.com> Change-Id: Id80ba60647d1970115a8cf200f0d71e4fada9b30
-rw-r--r--python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i2
-rw-r--r--src/armnnSerializer/test/LstmSerializationTests.cpp72
-rw-r--r--src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp21
-rw-r--r--src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp6
-rw-r--r--src/backends/cl/test/ClFallbackTests.cpp1
-rw-r--r--src/backends/neon/test/NeonTensorHandleTests.cpp2
6 files changed, 68 insertions, 36 deletions
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i
index d8ef37d762..892b8e4f08 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i
@@ -237,7 +237,7 @@ public:
%feature("docstring",
"
Sets the tensor info to be constant.
-
+
Args:
IsConstant (bool): Sets tensor info to constant.
diff --git a/src/armnnSerializer/test/LstmSerializationTests.cpp b/src/armnnSerializer/test/LstmSerializationTests.cpp
index 3178bc990e..d8f8967bcd 100644
--- a/src/armnnSerializer/test/LstmSerializationTests.cpp
+++ b/src/armnnSerializer/test/LstmSerializationTests.cpp
@@ -1454,7 +1454,8 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputToInputWeightsInfo(inputToInputWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::ConstTensor inputToInputWeights(inputToInputWeightsInfo, inputToInputWeightsData);
armnn::TensorShape inputToForgetWeightsShape = {4, 2};
@@ -1462,7 +1463,8 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputToForgetWeightsInfo(inputToForgetWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::ConstTensor inputToForgetWeights(inputToForgetWeightsInfo, inputToForgetWeightsData);
armnn::TensorShape inputToCellWeightsShape = {4, 2};
@@ -1470,7 +1472,8 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputToCellWeightsInfo(inputToCellWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::ConstTensor inputToCellWeights(inputToCellWeightsInfo, inputToCellWeightsData);
armnn::TensorShape inputToOutputWeightsShape = {4, 2};
@@ -1478,7 +1481,8 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputToOutputWeightsInfo(inputToOutputWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::ConstTensor inputToOutputWeights(inputToOutputWeightsInfo, inputToOutputWeightsData);
// The shape of recurrent weight data is {outputSize, outputSize} = {4, 4}
@@ -1487,7 +1491,8 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo recurrentToInputWeightsInfo(recurrentToInputWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::ConstTensor recurrentToInputWeights(recurrentToInputWeightsInfo, recurrentToInputWeightsData);
armnn::TensorShape recurrentToForgetWeightsShape = {4, 4};
@@ -1495,7 +1500,8 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo recurrentToForgetWeightsInfo(recurrentToForgetWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::ConstTensor recurrentToForgetWeights(recurrentToForgetWeightsInfo, recurrentToForgetWeightsData);
armnn::TensorShape recurrentToCellWeightsShape = {4, 4};
@@ -1503,7 +1509,8 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo recurrentToCellWeightsInfo(recurrentToCellWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::ConstTensor recurrentToCellWeights(recurrentToCellWeightsInfo, recurrentToCellWeightsData);
armnn::TensorShape recurrentToOutputWeightsShape = {4, 4};
@@ -1511,7 +1518,8 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo recurrentToOutputWeightsInfo(recurrentToOutputWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::ConstTensor recurrentToOutputWeights(recurrentToOutputWeightsInfo, recurrentToOutputWeightsData);
// The shape of bias data is {outputSize} = {4}
@@ -1520,7 +1528,8 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputGateBiasInfo(inputGateBiasShape,
armnn::DataType::Signed32,
biasScale,
- biasOffset, true);
+ biasOffset,
+ true);
armnn::ConstTensor inputGateBias(inputGateBiasInfo, inputGateBiasData);
armnn::TensorShape forgetGateBiasShape = {4};
@@ -1528,7 +1537,8 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo forgetGateBiasInfo(forgetGateBiasShape,
armnn::DataType::Signed32,
biasScale,
- biasOffset, true);
+ biasOffset,
+ true);
armnn::ConstTensor forgetGateBias(forgetGateBiasInfo, forgetGateBiasData);
armnn::TensorShape cellBiasShape = {4};
@@ -1536,7 +1546,8 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo cellBiasInfo(cellBiasShape,
armnn::DataType::Signed32,
biasScale,
- biasOffset, true);
+ biasOffset,
+ true);
armnn::ConstTensor cellBias(cellBiasInfo, cellBiasData);
armnn::TensorShape outputGateBiasShape = {4};
@@ -1544,7 +1555,8 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo outputGateBiasInfo(outputGateBiasShape,
armnn::DataType::Signed32,
biasScale,
- biasOffset, true);
+ biasOffset,
+ true);
armnn::ConstTensor outputGateBias(outputGateBiasInfo, outputGateBiasData);
armnn::QuantizedLstmInputParams params;
@@ -1655,12 +1667,14 @@ TEST_CASE("SerializeDeserializeQLstmBasic")
armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, biasOffset, true);
@@ -1816,22 +1830,26 @@ TEST_CASE("SerializeDeserializeQLstmCifgLayerNorm")
armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::TensorInfo biasInfo({numUnits},
armnn::DataType::Signed32,
biasScale,
- biasOffset, true);
+ biasOffset,
+ true);
armnn::TensorInfo layerNormWeightsInfo({numUnits},
armnn::DataType::QSymmS16,
layerNormScale,
- layerNormOffset, true);
+ layerNormOffset,
+ true);
// Mandatory params
std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
@@ -2003,32 +2021,38 @@ TEST_CASE("SerializeDeserializeQLstmAdvanced")
armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::TensorInfo biasInfo({numUnits},
armnn::DataType::Signed32,
biasScale,
- biasOffset, true);
+ biasOffset,
+ true);
armnn::TensorInfo peepholeWeightsInfo({numUnits},
armnn::DataType::QSymmS16,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::TensorInfo layerNormWeightsInfo({numUnits},
armnn::DataType::QSymmS16,
layerNormScale,
- layerNormOffset, true);
+ layerNormOffset,
+ true);
armnn::TensorInfo projectionWeightsInfo({outputSize, numUnits},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
// Mandatory params
std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
diff --git a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
index e2147fc59b..7c87f358d6 100644
--- a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
@@ -80,22 +80,26 @@ void QLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
const armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
const armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
const armnn::TensorInfo biasInfo({outputSize},
armnn::DataType::Signed32,
biasScale,
- biasOffset, true);
+ biasOffset,
+ true);
const armnn::TensorInfo layerNormWeightsInfo({numUnits},
armnn::DataType::QSymmS16,
layerNormScale,
- layerNormOffset, true);
+ layerNormOffset,
+ true);
// Mandatory params
const std::vector<int8_t> inputToForgetWeightsVector =
@@ -179,17 +183,20 @@ void QLstmEndToEnd(const std::vector<armnn::BackendId>& backends)
const armnn::TensorInfo inputInfo({numBatches , inputSize},
armnn::DataType::QAsymmS8,
inputScale,
- inputOffset, true);
+ inputOffset,
+ true);
const armnn::TensorInfo cellStateInfo({numBatches , numUnits},
armnn::DataType::QSymmS16,
cellStateScale,
- cellStateOffset, true);
+ cellStateOffset,
+ true);
const armnn::TensorInfo outputStateInfo({numBatches , outputSize},
armnn::DataType::QAsymmS8,
outputScale,
- outputOffset, true);
+ outputOffset,
+ true);
// Input tensor data
const std::vector<int8_t> inputVector = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64};
diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
index f178951873..d481404f92 100644
--- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
@@ -46,12 +46,14 @@ armnn::INetworkPtr CreateQuantizedLstmNetwork(armnn::TensorShape& inputShape,
armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset, true);
+ weightsOffset,
+ true);
armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset, true);
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 7cd05d193b..cfe2b369ac 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -540,7 +540,6 @@ TEST_CASE("ClImportDisableFallbackSubgraphToNeon")
std::vector<float> expectedOutput{ 11.0f, -1.0f };
-
InputTensors inputTensors
{
{ 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index 2e6854a331..685a0744e7 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -422,7 +422,7 @@ TEST_CASE("SplitteronXorYNoPaddingRequiredTest")
TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkIdentifier, it.first);
inputTensorInfo.SetConstant(true);
inputTensors.push_back({it.first,
- ConstTensor(inputTensorInfo, it.second.data())});
+ ConstTensor(inputTensorInfo, it.second.data())});
}
OutputTensors outputTensors;
outputTensors.reserve(expectedOutputData.size());