From 5b8093c17044e8eaaaa42d96ba4902dee5791be4 Mon Sep 17 00:00:00 2001 From: Cathal Corbett Date: Fri, 22 Oct 2021 11:12:07 +0100 Subject: IVGCVSW-6420: Constant flag in tensor info is not set correctly !android-nn-driver:6532 !armnn-internal-tests:372451 * Made fix to 2 out of 3 ConstTensor() constructors in Tensor.hpp to throw InvalidArgumentException when TensorInfo isConstant parameter is false. * Added new ConstTensor() constructor in Tensor.cpp to accept vector<>.data() using template. * Fixed runtime->GetOutputTensorInfo()/GetInputTensorInfo() methods and called submethods to return TensorInfo& rather than TensorInfo. * Fixed all failing unit tests for CpuRef/CpuAcc/GpuAcc to ensure any ConstTensor created has it's TensorInfo isConstant set to true. * Added unit tests in TensorTest.cpp to ensure ConstTensor constructors throw InvalidArgumentException when TensorInfo isConstat parameter is false. * Added unit test to ensure an empty ConstTensor constructor will set TensorInfo isConatant to true. * Indentation fixes. * Fix to arm_tensor.i to add isConstant parameter to TensorInfo constructor. Added methods IsConstant() and SetConstant(). * Fix to const_tensor.py to throw ValueError when TensorInfo isConstant is set to false when constructing a ConstTensor. * Fixed PyArmnn unit tests to set TensorInfo isConstant to True when ConstTensor is used. * Added unit tests in test_const_tensor.py to ensure ConstTensor constructors throw ValueError when TensorInfo isConstat parameter is false. Signed-off-by: Cathal Corbett Change-Id: I44e440dd0422c366d31bbdbc77ad2b4db0bde148 --- .../test/ActivationSerializationTests.cpp | 4 +- .../test/LstmSerializationTests.cpp | 118 ++++++++++----------- src/armnnSerializer/test/SerializerTests.cpp | 50 ++++----- 3 files changed, 87 insertions(+), 85 deletions(-) (limited to 'src/armnnSerializer/test') diff --git a/src/armnnSerializer/test/ActivationSerializationTests.cpp b/src/armnnSerializer/test/ActivationSerializationTests.cpp index fb99e0bc3e..341752dd67 100644 --- a/src/armnnSerializer/test/ActivationSerializationTests.cpp +++ b/src/armnnSerializer/test/ActivationSerializationTests.cpp @@ -84,9 +84,11 @@ TEST_CASE("ActivationSerialization") run->LoadNetwork(networkIdentifier, std::move(deserializedOptimized)); std::vector inputData {0.0f, -5.3f, 42.0f, -42.0f}; + armnn::TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0); + inputTensorInfo.SetConstant(true); armnn::InputTensors inputTensors { - {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())} + {0, armnn::ConstTensor(inputTensorInfo, inputData.data())} }; std::vector expectedOutputData {0.0f, 0.0f, 42.0f, 0.0f}; diff --git a/src/armnnSerializer/test/LstmSerializationTests.cpp b/src/armnnSerializer/test/LstmSerializationTests.cpp index bdc37877f7..3178bc990e 100644 --- a/src/armnnSerializer/test/LstmSerializationTests.cpp +++ b/src/armnnSerializer/test/LstmSerializationTests.cpp @@ -190,7 +190,7 @@ TEST_CASE("SerializeDeserializeLstmCifgPeepholeNoProjection") const uint32_t numUnits = 4; const uint32_t outputSize = numUnits; - armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32); + armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector inputToForgetWeightsData = GenerateRandomData(inputWeightsInfo1.GetNumElements()); armnn::ConstTensor inputToForgetWeights(inputWeightsInfo1, inputToForgetWeightsData); @@ -200,7 +200,7 @@ TEST_CASE("SerializeDeserializeLstmCifgPeepholeNoProjection") std::vector inputToOutputWeightsData = GenerateRandomData(inputWeightsInfo1.GetNumElements()); armnn::ConstTensor inputToOutputWeights(inputWeightsInfo1, inputToOutputWeightsData); - armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32); + armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector recurrentToForgetWeightsData = GenerateRandomData(inputWeightsInfo2.GetNumElements()); armnn::ConstTensor recurrentToForgetWeights(inputWeightsInfo2, recurrentToForgetWeightsData); @@ -210,7 +210,7 @@ TEST_CASE("SerializeDeserializeLstmCifgPeepholeNoProjection") std::vector recurrentToOutputWeightsData = GenerateRandomData(inputWeightsInfo2.GetNumElements()); armnn::ConstTensor recurrentToOutputWeights(inputWeightsInfo2, recurrentToOutputWeightsData); - armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32); + armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32, 0.0f, 0, true); std::vector cellToForgetWeightsData = GenerateRandomData(inputWeightsInfo3.GetNumElements()); armnn::ConstTensor cellToForgetWeights(inputWeightsInfo3, cellToForgetWeightsData); @@ -304,7 +304,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeAndProjection") const uint32_t numUnits = 20; const uint32_t outputSize = 16; - armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector inputToInputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData); @@ -317,7 +317,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeAndProjection") std::vector inputToOutputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData); - armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true); std::vector inputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData); @@ -330,7 +330,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeAndProjection") std::vector outputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData); - armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector recurrentToInputWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData); @@ -352,11 +352,11 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeAndProjection") std::vector cellToOutputWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData); - armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true); std::vector projectionWeightsData = GenerateRandomData(tensorInfo16x20.GetNumElements()); armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData); - armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector projectionBiasData(outputSize, 0.f); armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData); @@ -451,7 +451,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm const uint32_t numUnits = 20; const uint32_t outputSize = 16; - armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector inputToInputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData); @@ -464,7 +464,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm std::vector inputToOutputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData); - armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true); std::vector inputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData); @@ -477,7 +477,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm std::vector outputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData); - armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector recurrentToInputWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData); @@ -499,11 +499,11 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm std::vector cellToOutputWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData); - armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true); std::vector projectionWeightsData = GenerateRandomData(tensorInfo16x20.GetNumElements()); armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData); - armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector projectionBiasData(outputSize, 0.f); armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData); @@ -1236,7 +1236,7 @@ TEST_CASE("EnsureLstmLayersBackwardCompatibility") const uint32_t numUnits = 20u; const uint32_t outputSize = 16u; - armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector inputToInputWeightsData(tensorInfo20x5.GetNumElements(), 0.0f); armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData); @@ -1249,7 +1249,7 @@ TEST_CASE("EnsureLstmLayersBackwardCompatibility") std::vector inputToOutputWeightsData(tensorInfo20x5.GetNumElements(), 0.0f); armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData); - armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true); std::vector inputGateBiasData(tensorInfo20.GetNumElements(), 0.0f); armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData); @@ -1262,7 +1262,7 @@ TEST_CASE("EnsureLstmLayersBackwardCompatibility") std::vector outputGateBiasData(tensorInfo20.GetNumElements(), 0.0f); armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData); - armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector recurrentToInputWeightsData(tensorInfo20x16.GetNumElements(), 0.0f); armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData); @@ -1284,11 +1284,11 @@ TEST_CASE("EnsureLstmLayersBackwardCompatibility") std::vector cellToOutputWeightsData(tensorInfo20.GetNumElements(), 0.0f); armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData); - armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true); std::vector projectionWeightsData(tensorInfo16x20.GetNumElements(), 0.0f); armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData); - armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector projectionBiasData(outputSize, 0.0f); armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData); @@ -1454,7 +1454,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm") armnn::TensorInfo inputToInputWeightsInfo(inputToInputWeightsShape, armnn::DataType::QAsymmU8, weightsScale, - weightsOffset); + weightsOffset, true); armnn::ConstTensor inputToInputWeights(inputToInputWeightsInfo, inputToInputWeightsData); armnn::TensorShape inputToForgetWeightsShape = {4, 2}; @@ -1462,7 +1462,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm") armnn::TensorInfo inputToForgetWeightsInfo(inputToForgetWeightsShape, armnn::DataType::QAsymmU8, weightsScale, - weightsOffset); + weightsOffset, true); armnn::ConstTensor inputToForgetWeights(inputToForgetWeightsInfo, inputToForgetWeightsData); armnn::TensorShape inputToCellWeightsShape = {4, 2}; @@ -1470,7 +1470,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm") armnn::TensorInfo inputToCellWeightsInfo(inputToCellWeightsShape, armnn::DataType::QAsymmU8, weightsScale, - weightsOffset); + weightsOffset, true); armnn::ConstTensor inputToCellWeights(inputToCellWeightsInfo, inputToCellWeightsData); armnn::TensorShape inputToOutputWeightsShape = {4, 2}; @@ -1478,7 +1478,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm") armnn::TensorInfo inputToOutputWeightsInfo(inputToOutputWeightsShape, armnn::DataType::QAsymmU8, weightsScale, - weightsOffset); + weightsOffset, true); armnn::ConstTensor inputToOutputWeights(inputToOutputWeightsInfo, inputToOutputWeightsData); // The shape of recurrent weight data is {outputSize, outputSize} = {4, 4} @@ -1487,7 +1487,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm") armnn::TensorInfo recurrentToInputWeightsInfo(recurrentToInputWeightsShape, armnn::DataType::QAsymmU8, weightsScale, - weightsOffset); + weightsOffset, true); armnn::ConstTensor recurrentToInputWeights(recurrentToInputWeightsInfo, recurrentToInputWeightsData); armnn::TensorShape recurrentToForgetWeightsShape = {4, 4}; @@ -1495,7 +1495,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm") armnn::TensorInfo recurrentToForgetWeightsInfo(recurrentToForgetWeightsShape, armnn::DataType::QAsymmU8, weightsScale, - weightsOffset); + weightsOffset, true); armnn::ConstTensor recurrentToForgetWeights(recurrentToForgetWeightsInfo, recurrentToForgetWeightsData); armnn::TensorShape recurrentToCellWeightsShape = {4, 4}; @@ -1503,7 +1503,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm") armnn::TensorInfo recurrentToCellWeightsInfo(recurrentToCellWeightsShape, armnn::DataType::QAsymmU8, weightsScale, - weightsOffset); + weightsOffset, true); armnn::ConstTensor recurrentToCellWeights(recurrentToCellWeightsInfo, recurrentToCellWeightsData); armnn::TensorShape recurrentToOutputWeightsShape = {4, 4}; @@ -1511,7 +1511,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm") armnn::TensorInfo recurrentToOutputWeightsInfo(recurrentToOutputWeightsShape, armnn::DataType::QAsymmU8, weightsScale, - weightsOffset); + weightsOffset, true); armnn::ConstTensor recurrentToOutputWeights(recurrentToOutputWeightsInfo, recurrentToOutputWeightsData); // The shape of bias data is {outputSize} = {4} @@ -1520,7 +1520,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm") armnn::TensorInfo inputGateBiasInfo(inputGateBiasShape, armnn::DataType::Signed32, biasScale, - biasOffset); + biasOffset, true); armnn::ConstTensor inputGateBias(inputGateBiasInfo, inputGateBiasData); armnn::TensorShape forgetGateBiasShape = {4}; @@ -1528,7 +1528,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm") armnn::TensorInfo forgetGateBiasInfo(forgetGateBiasShape, armnn::DataType::Signed32, biasScale, - biasOffset); + biasOffset, true); armnn::ConstTensor forgetGateBias(forgetGateBiasInfo, forgetGateBiasData); armnn::TensorShape cellBiasShape = {4}; @@ -1536,7 +1536,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm") armnn::TensorInfo cellBiasInfo(cellBiasShape, armnn::DataType::Signed32, biasScale, - biasOffset); + biasOffset, true); armnn::ConstTensor cellBias(cellBiasInfo, cellBiasData); armnn::TensorShape outputGateBiasShape = {4}; @@ -1544,7 +1544,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm") armnn::TensorInfo outputGateBiasInfo(outputGateBiasShape, armnn::DataType::Signed32, biasScale, - biasOffset); + biasOffset, true); armnn::ConstTensor outputGateBias(outputGateBiasInfo, outputGateBiasData); armnn::QuantizedLstmInputParams params; @@ -1655,14 +1655,14 @@ TEST_CASE("SerializeDeserializeQLstmBasic") armnn::TensorInfo inputWeightsInfo({numUnits, inputSize}, armnn::DataType::QSymmS8, weightsScale, - weightsOffset); + weightsOffset, true); armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize}, armnn::DataType::QSymmS8, weightsScale, - weightsOffset); + weightsOffset, true); - armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, biasOffset); + armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, biasOffset, true); std::vector inputToForgetWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); std::vector inputToCellWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); @@ -1816,22 +1816,22 @@ TEST_CASE("SerializeDeserializeQLstmCifgLayerNorm") armnn::TensorInfo inputWeightsInfo({numUnits, inputSize}, armnn::DataType::QSymmS8, weightsScale, - weightsOffset); + weightsOffset, true); armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize}, armnn::DataType::QSymmS8, weightsScale, - weightsOffset); + weightsOffset, true); armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, - biasOffset); + biasOffset, true); armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, - layerNormOffset); + layerNormOffset, true); // Mandatory params std::vector inputToForgetWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); @@ -2003,32 +2003,32 @@ TEST_CASE("SerializeDeserializeQLstmAdvanced") armnn::TensorInfo inputWeightsInfo({numUnits, inputSize}, armnn::DataType::QSymmS8, weightsScale, - weightsOffset); + weightsOffset, true); armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize}, armnn::DataType::QSymmS8, weightsScale, - weightsOffset); + weightsOffset, true); armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, - biasOffset); + biasOffset, true); armnn::TensorInfo peepholeWeightsInfo({numUnits}, armnn::DataType::QSymmS16, weightsScale, - weightsOffset); + weightsOffset, true); armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, - layerNormOffset); + layerNormOffset, true); armnn::TensorInfo projectionWeightsInfo({outputSize, numUnits}, armnn::DataType::QSymmS8, weightsScale, - weightsOffset); + weightsOffset, true); // Mandatory params std::vector inputToForgetWeightsData = GenerateRandomData(inputWeightsInfo.GetNumElements()); @@ -2213,7 +2213,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio const uint32_t numUnits = 4; const uint32_t outputSize = numUnits; - armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32); + armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector inputToForgetWeightsData = GenerateRandomData(inputWeightsInfo1.GetNumElements()); armnn::ConstTensor inputToForgetWeights(inputWeightsInfo1, inputToForgetWeightsData); @@ -2223,7 +2223,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio std::vector inputToOutputWeightsData = GenerateRandomData(inputWeightsInfo1.GetNumElements()); armnn::ConstTensor inputToOutputWeights(inputWeightsInfo1, inputToOutputWeightsData); - armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32); + armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector recurrentToForgetWeightsData = GenerateRandomData(inputWeightsInfo2.GetNumElements()); armnn::ConstTensor recurrentToForgetWeights(inputWeightsInfo2, recurrentToForgetWeightsData); @@ -2233,7 +2233,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio std::vector recurrentToOutputWeightsData = GenerateRandomData(inputWeightsInfo2.GetNumElements()); armnn::ConstTensor recurrentToOutputWeights(inputWeightsInfo2, recurrentToOutputWeightsData); - armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32); + armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32, 0.0f, 0, true); std::vector cellToForgetWeightsData = GenerateRandomData(inputWeightsInfo3.GetNumElements()); armnn::ConstTensor cellToForgetWeights(inputWeightsInfo3, cellToForgetWeightsData); @@ -2318,7 +2318,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeAndPr const uint32_t numUnits = 20; const uint32_t outputSize = 16; - armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector inputToInputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData); @@ -2331,7 +2331,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeAndPr std::vector inputToOutputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData); - armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true); std::vector inputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData); @@ -2344,7 +2344,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeAndPr std::vector outputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData); - armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector recurrentToInputWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData); @@ -2366,11 +2366,11 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeAndPr std::vector cellToOutputWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData); - armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true); std::vector projectionWeightsData = GenerateRandomData(tensorInfo16x20.GetNumElements()); armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData); - armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector projectionBiasData(outputSize, 0.f); armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData); @@ -2456,7 +2456,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeWithP const uint32_t numUnits = 20; const uint32_t outputSize = 16; - armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector inputToInputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData); @@ -2469,7 +2469,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeWithP std::vector inputToOutputWeightsData = GenerateRandomData(tensorInfo20x5.GetNumElements()); armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData); - armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true); std::vector inputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData); @@ -2482,7 +2482,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeWithP std::vector outputGateBiasData = GenerateRandomData(tensorInfo20.GetNumElements()); armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData); - armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector recurrentToInputWeightsData = GenerateRandomData(tensorInfo20x16.GetNumElements()); armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData); @@ -2504,11 +2504,11 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeWithP std::vector cellToOutputWeightsData = GenerateRandomData(tensorInfo20.GetNumElements()); armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData); - armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true); std::vector projectionWeightsData = GenerateRandomData(tensorInfo16x20.GetNumElements()); armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData); - armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32); + armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector projectionBiasData(outputSize, 0.f); armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData); @@ -2611,7 +2611,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio const uint32_t numUnits = 4; const uint32_t outputSize = numUnits; - armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32); + armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector inputToForgetWeightsData = GenerateRandomData(inputWeightsInfo1.GetNumElements()); armnn::ConstTensor inputToForgetWeights(inputWeightsInfo1, inputToForgetWeightsData); @@ -2621,7 +2621,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio std::vector inputToOutputWeightsData = GenerateRandomData(inputWeightsInfo1.GetNumElements()); armnn::ConstTensor inputToOutputWeights(inputWeightsInfo1, inputToOutputWeightsData); - armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32); + armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true); std::vector recurrentToForgetWeightsData = GenerateRandomData(inputWeightsInfo2.GetNumElements()); armnn::ConstTensor recurrentToForgetWeights(inputWeightsInfo2, recurrentToForgetWeightsData); @@ -2631,7 +2631,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio std::vector recurrentToOutputWeightsData = GenerateRandomData(inputWeightsInfo2.GetNumElements()); armnn::ConstTensor recurrentToOutputWeights(inputWeightsInfo2, recurrentToOutputWeightsData); - armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32); + armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32, 0.0f, 0, true); std::vector cellToForgetWeightsData = GenerateRandomData(inputWeightsInfo3.GetNumElements()); armnn::ConstTensor cellToForgetWeights(inputWeightsInfo3, cellToForgetWeightsData); diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index e32b90837d..f4e25998d9 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -98,10 +98,10 @@ TEST_CASE("SerializeBatchNormalization") const armnn::TensorInfo inputInfo ({ 1, 3, 3, 1 }, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32); - const armnn::TensorInfo meanInfo({1}, armnn::DataType::Float32); - const armnn::TensorInfo varianceInfo({1}, armnn::DataType::Float32); - const armnn::TensorInfo betaInfo({1}, armnn::DataType::Float32); - const armnn::TensorInfo gammaInfo({1}, armnn::DataType::Float32); + const armnn::TensorInfo meanInfo({1}, armnn::DataType::Float32, 0.0f, 0, true); + const armnn::TensorInfo varianceInfo({1}, armnn::DataType::Float32, 0.0f, 0, true); + const armnn::TensorInfo betaInfo({1}, armnn::DataType::Float32, 0.0f, 0, true); + const armnn::TensorInfo gammaInfo({1}, armnn::DataType::Float32, 0.0f, 0, true); armnn::BatchNormalizationDescriptor descriptor; descriptor.m_Eps = 0.0010000000475f; @@ -307,7 +307,7 @@ TEST_CASE("SerializeConstant") }; const std::string layerName("constant"); - const armnn::TensorInfo info({ 2, 3 }, armnn::DataType::Float32); + const armnn::TensorInfo info({ 2, 3 }, armnn::DataType::Float32, 0.0f, 0, true); std::vector constantData = GenerateRandomData(info.GetNumElements()); armnn::ConstTensor constTensor(info, constantData); @@ -339,8 +339,8 @@ TEST_CASE("SerializeConvolution2d") const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32); - const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32); - const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32); + const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true); + const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true); std::vector weightsData = GenerateRandomData(weightsInfo.GetNumElements()); armnn::ConstTensor weights(weightsInfo, weightsData); @@ -395,10 +395,10 @@ TEST_CASE("SerializeConvolution2dWithPerAxisParams") const std::vector quantScales{ 0.75f, 0.65f, 0.85f }; constexpr unsigned int quantDimension = 0; - const TensorInfo kernelInfo({ 3, 1, 1, 2 }, DataType::QSymmS8, quantScales, quantDimension); + const TensorInfo kernelInfo({ 3, 1, 1, 2 }, DataType::QSymmS8, quantScales, quantDimension, true); const std::vector biasQuantScales{ 0.25f, 0.50f, 0.75f }; - const TensorInfo biasInfo({ 3 }, DataType::Signed32, biasQuantScales, quantDimension); + const TensorInfo biasInfo({ 3 }, DataType::Signed32, biasQuantScales, quantDimension, true); std::vector kernelData = GenerateRandomData(kernelInfo.GetNumElements()); armnn::ConstTensor weights(kernelInfo, kernelData); @@ -445,8 +445,8 @@ TEST_CASE("SerializeConvolution3d") const armnn::TensorInfo inputInfo ({ 1, 5, 5, 5, 1 }, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 1, 2, 2, 2, 1 }, armnn::DataType::Float32); - const armnn::TensorInfo weightsInfo({ 3, 3, 3, 1, 1 }, armnn::DataType::Float32); - const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32); + const armnn::TensorInfo weightsInfo({ 3, 3, 3, 1, 1 }, armnn::DataType::Float32, 0.0f, 0, true); + const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true); std::vector weightsData = GenerateRandomData(weightsInfo.GetNumElements()); armnn::ConstTensor weights(weightsInfo, weightsData); @@ -530,8 +530,8 @@ TEST_CASE("SerializeDepthwiseConvolution2d") const armnn::TensorInfo inputInfo ({ 1, 5, 5, 3 }, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 1, 3, 3, 3 }, armnn::DataType::Float32); - const armnn::TensorInfo weightsInfo({ 1, 3, 3, 3 }, armnn::DataType::Float32); - const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32); + const armnn::TensorInfo weightsInfo({ 1, 3, 3, 3 }, armnn::DataType::Float32, 0.0f, 0, true); + const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32, 0.0f, 0, true); std::vector weightsData = GenerateRandomData(weightsInfo.GetNumElements()); armnn::ConstTensor weights(weightsInfo, weightsData); @@ -585,11 +585,11 @@ TEST_CASE("SerializeDepthwiseConvolution2dWithPerAxisParams") const std::vector quantScales{ 0.75f, 0.80f, 0.90f, 0.95f }; const unsigned int quantDimension = 0; - TensorInfo kernelInfo({ 2, 2, 2, 2 }, DataType::QSymmS8, quantScales, quantDimension); + TensorInfo kernelInfo({ 2, 2, 2, 2 }, DataType::QSymmS8, quantScales, quantDimension, true); const std::vector biasQuantScales{ 0.25f, 0.35f, 0.45f, 0.55f }; constexpr unsigned int biasQuantDimension = 0; - TensorInfo biasInfo({ 4 }, DataType::Signed32, biasQuantScales, biasQuantDimension); + TensorInfo biasInfo({ 4 }, DataType::Signed32, biasQuantScales, biasQuantDimension, true); std::vector kernelData = GenerateRandomData(kernelInfo.GetNumElements()); armnn::ConstTensor weights(kernelInfo, kernelData); @@ -685,7 +685,7 @@ TEST_CASE("SerializeDeserializeDetectionPostProcess") descriptor.m_ScaleH = 5.0; descriptor.m_ScaleW = 5.0; - const armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32); + const armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32, 0.0f, 0, true); const std::vector anchorsData({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, @@ -913,8 +913,8 @@ TEST_CASE("SerializeFullyConnected") const armnn::TensorInfo inputInfo ({ 2, 5, 1, 1 }, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 2, 3 }, armnn::DataType::Float32); - const armnn::TensorInfo weightsInfo({ 5, 3 }, armnn::DataType::Float32); - const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32); + const armnn::TensorInfo weightsInfo({ 5, 3 }, armnn::DataType::Float32, 0.0f, 0, true); + const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32, 0.0f, 0, true); std::vector weightsData = GenerateRandomData(weightsInfo.GetNumElements()); std::vector biasesData = GenerateRandomData(biasesInfo.GetNumElements()); armnn::ConstTensor weights(weightsInfo, weightsData); @@ -1003,8 +1003,8 @@ TEST_CASE("SerializeFullyConnectedWeightsAndBiasesAsConstantLayers") const armnn::TensorInfo inputInfo ({ 2, 5, 1, 1 }, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 2, 3 }, armnn::DataType::Float32); - const armnn::TensorInfo weightsInfo({ 5, 3 }, armnn::DataType::Float32); - const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32); + const armnn::TensorInfo weightsInfo({ 5, 3 }, armnn::DataType::Float32, 0.0f, 0, true); + const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32, 0.0f, 0, true); std::vector weightsData = GenerateRandomData(weightsInfo.GetNumElements()); std::vector biasesData = GenerateRandomData(biasesInfo.GetNumElements()); @@ -1077,7 +1077,7 @@ TEST_CASE("SerializeGather") const std::string layerName("gather"); armnn::TensorInfo paramsInfo({ 8 }, armnn::DataType::QAsymmU8); armnn::TensorInfo outputInfo({ 3 }, armnn::DataType::QAsymmU8); - const armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32); + const armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32, 0.0f, 0, true); GatherDescriptor descriptor; descriptor.m_Axis = 1; @@ -2447,7 +2447,7 @@ TEST_CASE("SerializeSwitch") }; const std::string layerName("switch"); - const armnn::TensorInfo info({ 1, 4 }, armnn::DataType::Float32); + const armnn::TensorInfo info({ 1, 4 }, armnn::DataType::Float32, 0.0f, 0, true); std::vector constantData = GenerateRandomData(info.GetNumElements()); armnn::ConstTensor constTensor(info, constantData); @@ -2509,8 +2509,8 @@ TEST_CASE("SerializeTransposeConvolution2d") const armnn::TensorInfo inputInfo ({ 1, 7, 7, 1 }, armnn::DataType::Float32); const armnn::TensorInfo outputInfo({ 1, 9, 9, 1 }, armnn::DataType::Float32); - const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32); - const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32); + const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true); + const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true); std::vector weightsData = GenerateRandomData(weightsInfo.GetNumElements()); armnn::ConstTensor weights(weightsInfo, weightsData); @@ -2594,7 +2594,7 @@ TEST_CASE("SerializeDeserializeNonLinearNetwork") }; const std::string layerName("constant"); - const armnn::TensorInfo info({ 2, 3 }, armnn::DataType::Float32); + const armnn::TensorInfo info({ 2, 3 }, armnn::DataType::Float32, 0.0f, 0, true); std::vector constantData = GenerateRandomData(info.GetNumElements()); armnn::ConstTensor constTensor(info, constantData); -- cgit v1.2.1