aboutsummaryrefslogtreecommitdiff
path: root/src/armnnSerializer/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnnSerializer/test')
-rw-r--r--src/armnnSerializer/test/ActivationSerializationTests.cpp4
-rw-r--r--src/armnnSerializer/test/LstmSerializationTests.cpp118
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp50
3 files changed, 87 insertions, 85 deletions
diff --git a/src/armnnSerializer/test/ActivationSerializationTests.cpp b/src/armnnSerializer/test/ActivationSerializationTests.cpp
index fb99e0bc3e..341752dd67 100644
--- a/src/armnnSerializer/test/ActivationSerializationTests.cpp
+++ b/src/armnnSerializer/test/ActivationSerializationTests.cpp
@@ -84,9 +84,11 @@ TEST_CASE("ActivationSerialization")
run->LoadNetwork(networkIdentifier, std::move(deserializedOptimized));
std::vector<float> inputData {0.0f, -5.3f, 42.0f, -42.0f};
+ armnn::TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
+ inputTensorInfo.SetConstant(true);
armnn::InputTensors inputTensors
{
- {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}
+ {0, armnn::ConstTensor(inputTensorInfo, inputData.data())}
};
std::vector<float> expectedOutputData {0.0f, 0.0f, 42.0f, 0.0f};
diff --git a/src/armnnSerializer/test/LstmSerializationTests.cpp b/src/armnnSerializer/test/LstmSerializationTests.cpp
index bdc37877f7..3178bc990e 100644
--- a/src/armnnSerializer/test/LstmSerializationTests.cpp
+++ b/src/armnnSerializer/test/LstmSerializationTests.cpp
@@ -190,7 +190,7 @@ TEST_CASE("SerializeDeserializeLstmCifgPeepholeNoProjection")
const uint32_t numUnits = 4;
const uint32_t outputSize = numUnits;
- armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
armnn::ConstTensor inputToForgetWeights(inputWeightsInfo1, inputToForgetWeightsData);
@@ -200,7 +200,7 @@ TEST_CASE("SerializeDeserializeLstmCifgPeepholeNoProjection")
std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
armnn::ConstTensor inputToOutputWeights(inputWeightsInfo1, inputToOutputWeightsData);
- armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
armnn::ConstTensor recurrentToForgetWeights(inputWeightsInfo2, recurrentToForgetWeightsData);
@@ -210,7 +210,7 @@ TEST_CASE("SerializeDeserializeLstmCifgPeepholeNoProjection")
std::vector<float> recurrentToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
armnn::ConstTensor recurrentToOutputWeights(inputWeightsInfo2, recurrentToOutputWeightsData);
- armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> cellToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo3.GetNumElements());
armnn::ConstTensor cellToForgetWeights(inputWeightsInfo3, cellToForgetWeightsData);
@@ -304,7 +304,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeAndProjection")
const uint32_t numUnits = 20;
const uint32_t outputSize = 16;
- armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToInputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
@@ -317,7 +317,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeAndProjection")
std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
- armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
@@ -330,7 +330,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeAndProjection")
std::vector<float> outputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
- armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToInputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
@@ -352,11 +352,11 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeAndProjection")
std::vector<float> cellToOutputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData);
- armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionWeightsData = GenerateRandomData<float>(tensorInfo16x20.GetNumElements());
armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
- armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionBiasData(outputSize, 0.f);
armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
@@ -451,7 +451,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm
const uint32_t numUnits = 20;
const uint32_t outputSize = 16;
- armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToInputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
@@ -464,7 +464,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm
std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
- armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
@@ -477,7 +477,7 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm
std::vector<float> outputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
- armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToInputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
@@ -499,11 +499,11 @@ TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm
std::vector<float> cellToOutputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData);
- armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionWeightsData = GenerateRandomData<float>(tensorInfo16x20.GetNumElements());
armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
- armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionBiasData(outputSize, 0.f);
armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
@@ -1236,7 +1236,7 @@ TEST_CASE("EnsureLstmLayersBackwardCompatibility")
const uint32_t numUnits = 20u;
const uint32_t outputSize = 16u;
- armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToInputWeightsData(tensorInfo20x5.GetNumElements(), 0.0f);
armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
@@ -1249,7 +1249,7 @@ TEST_CASE("EnsureLstmLayersBackwardCompatibility")
std::vector<float> inputToOutputWeightsData(tensorInfo20x5.GetNumElements(), 0.0f);
armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
- armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputGateBiasData(tensorInfo20.GetNumElements(), 0.0f);
armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
@@ -1262,7 +1262,7 @@ TEST_CASE("EnsureLstmLayersBackwardCompatibility")
std::vector<float> outputGateBiasData(tensorInfo20.GetNumElements(), 0.0f);
armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
- armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToInputWeightsData(tensorInfo20x16.GetNumElements(), 0.0f);
armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
@@ -1284,11 +1284,11 @@ TEST_CASE("EnsureLstmLayersBackwardCompatibility")
std::vector<float> cellToOutputWeightsData(tensorInfo20.GetNumElements(), 0.0f);
armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData);
- armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionWeightsData(tensorInfo16x20.GetNumElements(), 0.0f);
armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
- armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionBiasData(outputSize, 0.0f);
armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
@@ -1454,7 +1454,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputToInputWeightsInfo(inputToInputWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor inputToInputWeights(inputToInputWeightsInfo, inputToInputWeightsData);
armnn::TensorShape inputToForgetWeightsShape = {4, 2};
@@ -1462,7 +1462,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputToForgetWeightsInfo(inputToForgetWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor inputToForgetWeights(inputToForgetWeightsInfo, inputToForgetWeightsData);
armnn::TensorShape inputToCellWeightsShape = {4, 2};
@@ -1470,7 +1470,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputToCellWeightsInfo(inputToCellWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor inputToCellWeights(inputToCellWeightsInfo, inputToCellWeightsData);
armnn::TensorShape inputToOutputWeightsShape = {4, 2};
@@ -1478,7 +1478,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputToOutputWeightsInfo(inputToOutputWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor inputToOutputWeights(inputToOutputWeightsInfo, inputToOutputWeightsData);
// The shape of recurrent weight data is {outputSize, outputSize} = {4, 4}
@@ -1487,7 +1487,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo recurrentToInputWeightsInfo(recurrentToInputWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor recurrentToInputWeights(recurrentToInputWeightsInfo, recurrentToInputWeightsData);
armnn::TensorShape recurrentToForgetWeightsShape = {4, 4};
@@ -1495,7 +1495,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo recurrentToForgetWeightsInfo(recurrentToForgetWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor recurrentToForgetWeights(recurrentToForgetWeightsInfo, recurrentToForgetWeightsData);
armnn::TensorShape recurrentToCellWeightsShape = {4, 4};
@@ -1503,7 +1503,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo recurrentToCellWeightsInfo(recurrentToCellWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor recurrentToCellWeights(recurrentToCellWeightsInfo, recurrentToCellWeightsData);
armnn::TensorShape recurrentToOutputWeightsShape = {4, 4};
@@ -1511,7 +1511,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo recurrentToOutputWeightsInfo(recurrentToOutputWeightsShape,
armnn::DataType::QAsymmU8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::ConstTensor recurrentToOutputWeights(recurrentToOutputWeightsInfo, recurrentToOutputWeightsData);
// The shape of bias data is {outputSize} = {4}
@@ -1520,7 +1520,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo inputGateBiasInfo(inputGateBiasShape,
armnn::DataType::Signed32,
biasScale,
- biasOffset);
+ biasOffset, true);
armnn::ConstTensor inputGateBias(inputGateBiasInfo, inputGateBiasData);
armnn::TensorShape forgetGateBiasShape = {4};
@@ -1528,7 +1528,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo forgetGateBiasInfo(forgetGateBiasShape,
armnn::DataType::Signed32,
biasScale,
- biasOffset);
+ biasOffset, true);
armnn::ConstTensor forgetGateBias(forgetGateBiasInfo, forgetGateBiasData);
armnn::TensorShape cellBiasShape = {4};
@@ -1536,7 +1536,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo cellBiasInfo(cellBiasShape,
armnn::DataType::Signed32,
biasScale,
- biasOffset);
+ biasOffset, true);
armnn::ConstTensor cellBias(cellBiasInfo, cellBiasData);
armnn::TensorShape outputGateBiasShape = {4};
@@ -1544,7 +1544,7 @@ TEST_CASE("SerializeDeserializeQuantizedLstm")
armnn::TensorInfo outputGateBiasInfo(outputGateBiasShape,
armnn::DataType::Signed32,
biasScale,
- biasOffset);
+ biasOffset, true);
armnn::ConstTensor outputGateBias(outputGateBiasInfo, outputGateBiasData);
armnn::QuantizedLstmInputParams params;
@@ -1655,14 +1655,14 @@ TEST_CASE("SerializeDeserializeQLstmBasic")
armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
- armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, biasOffset);
+ armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, biasOffset, true);
std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
std::vector<int8_t> inputToCellWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
@@ -1816,22 +1816,22 @@ TEST_CASE("SerializeDeserializeQLstmCifgLayerNorm")
armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::TensorInfo biasInfo({numUnits},
armnn::DataType::Signed32,
biasScale,
- biasOffset);
+ biasOffset, true);
armnn::TensorInfo layerNormWeightsInfo({numUnits},
armnn::DataType::QSymmS16,
layerNormScale,
- layerNormOffset);
+ layerNormOffset, true);
// Mandatory params
std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
@@ -2003,32 +2003,32 @@ TEST_CASE("SerializeDeserializeQLstmAdvanced")
armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::TensorInfo biasInfo({numUnits},
armnn::DataType::Signed32,
biasScale,
- biasOffset);
+ biasOffset, true);
armnn::TensorInfo peepholeWeightsInfo({numUnits},
armnn::DataType::QSymmS16,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
armnn::TensorInfo layerNormWeightsInfo({numUnits},
armnn::DataType::QSymmS16,
layerNormScale,
- layerNormOffset);
+ layerNormOffset, true);
armnn::TensorInfo projectionWeightsInfo({outputSize, numUnits},
armnn::DataType::QSymmS8,
weightsScale,
- weightsOffset);
+ weightsOffset, true);
// Mandatory params
std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
@@ -2213,7 +2213,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio
const uint32_t numUnits = 4;
const uint32_t outputSize = numUnits;
- armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
armnn::ConstTensor inputToForgetWeights(inputWeightsInfo1, inputToForgetWeightsData);
@@ -2223,7 +2223,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio
std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
armnn::ConstTensor inputToOutputWeights(inputWeightsInfo1, inputToOutputWeightsData);
- armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
armnn::ConstTensor recurrentToForgetWeights(inputWeightsInfo2, recurrentToForgetWeightsData);
@@ -2233,7 +2233,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio
std::vector<float> recurrentToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
armnn::ConstTensor recurrentToOutputWeights(inputWeightsInfo2, recurrentToOutputWeightsData);
- armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> cellToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo3.GetNumElements());
armnn::ConstTensor cellToForgetWeights(inputWeightsInfo3, cellToForgetWeightsData);
@@ -2318,7 +2318,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeAndPr
const uint32_t numUnits = 20;
const uint32_t outputSize = 16;
- armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToInputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
@@ -2331,7 +2331,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeAndPr
std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
- armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
@@ -2344,7 +2344,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeAndPr
std::vector<float> outputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
- armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToInputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
@@ -2366,11 +2366,11 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeAndPr
std::vector<float> cellToOutputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData);
- armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionWeightsData = GenerateRandomData<float>(tensorInfo16x20.GetNumElements());
armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
- armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionBiasData(outputSize, 0.f);
armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
@@ -2456,7 +2456,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeWithP
const uint32_t numUnits = 20;
const uint32_t outputSize = 16;
- armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToInputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToInputWeights(tensorInfo20x5, inputToInputWeightsData);
@@ -2469,7 +2469,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeWithP
std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(tensorInfo20x5.GetNumElements());
armnn::ConstTensor inputToOutputWeights(tensorInfo20x5, inputToOutputWeightsData);
- armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor inputGateBias(tensorInfo20, inputGateBiasData);
@@ -2482,7 +2482,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeWithP
std::vector<float> outputGateBiasData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor outputGateBias(tensorInfo20, outputGateBiasData);
- armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToInputWeightsData = GenerateRandomData<float>(tensorInfo20x16.GetNumElements());
armnn::ConstTensor recurrentToInputWeights(tensorInfo20x16, recurrentToInputWeightsData);
@@ -2504,11 +2504,11 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmNoCifgWithPeepholeWithP
std::vector<float> cellToOutputWeightsData = GenerateRandomData<float>(tensorInfo20.GetNumElements());
armnn::ConstTensor cellToOutputWeights(tensorInfo20, cellToOutputWeightsData);
- armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionWeightsData = GenerateRandomData<float>(tensorInfo16x20.GetNumElements());
armnn::ConstTensor projectionWeights(tensorInfo16x20, projectionWeightsData);
- armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> projectionBiasData(outputSize, 0.f);
armnn::ConstTensor projectionBias(tensorInfo16, projectionBiasData);
@@ -2611,7 +2611,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio
const uint32_t numUnits = 4;
const uint32_t outputSize = numUnits;
- armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo1({numUnits, inputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> inputToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
armnn::ConstTensor inputToForgetWeights(inputWeightsInfo1, inputToForgetWeightsData);
@@ -2621,7 +2621,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio
std::vector<float> inputToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo1.GetNumElements());
armnn::ConstTensor inputToOutputWeights(inputWeightsInfo1, inputToOutputWeightsData);
- armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo2({numUnits, outputSize}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> recurrentToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
armnn::ConstTensor recurrentToForgetWeights(inputWeightsInfo2, recurrentToForgetWeightsData);
@@ -2631,7 +2631,7 @@ TEST_CASE("SerializeDeserializeUnidirectionalSequenceLstmCifgPeepholeNoProjectio
std::vector<float> recurrentToOutputWeightsData = GenerateRandomData<float>(inputWeightsInfo2.GetNumElements());
armnn::ConstTensor recurrentToOutputWeights(inputWeightsInfo2, recurrentToOutputWeightsData);
- armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo inputWeightsInfo3({numUnits}, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> cellToForgetWeightsData = GenerateRandomData<float>(inputWeightsInfo3.GetNumElements());
armnn::ConstTensor cellToForgetWeights(inputWeightsInfo3, cellToForgetWeightsData);
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index e32b90837d..f4e25998d9 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -98,10 +98,10 @@ TEST_CASE("SerializeBatchNormalization")
const armnn::TensorInfo inputInfo ({ 1, 3, 3, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo meanInfo({1}, armnn::DataType::Float32);
- const armnn::TensorInfo varianceInfo({1}, armnn::DataType::Float32);
- const armnn::TensorInfo betaInfo({1}, armnn::DataType::Float32);
- const armnn::TensorInfo gammaInfo({1}, armnn::DataType::Float32);
+ const armnn::TensorInfo meanInfo({1}, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo varianceInfo({1}, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo betaInfo({1}, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo gammaInfo({1}, armnn::DataType::Float32, 0.0f, 0, true);
armnn::BatchNormalizationDescriptor descriptor;
descriptor.m_Eps = 0.0010000000475f;
@@ -307,7 +307,7 @@ TEST_CASE("SerializeConstant")
};
const std::string layerName("constant");
- const armnn::TensorInfo info({ 2, 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo info({ 2, 3 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> constantData = GenerateRandomData<float>(info.GetNumElements());
armnn::ConstTensor constTensor(info, constantData);
@@ -339,8 +339,8 @@ TEST_CASE("SerializeConvolution2d")
const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -395,10 +395,10 @@ TEST_CASE("SerializeConvolution2dWithPerAxisParams")
const std::vector<float> quantScales{ 0.75f, 0.65f, 0.85f };
constexpr unsigned int quantDimension = 0;
- const TensorInfo kernelInfo({ 3, 1, 1, 2 }, DataType::QSymmS8, quantScales, quantDimension);
+ const TensorInfo kernelInfo({ 3, 1, 1, 2 }, DataType::QSymmS8, quantScales, quantDimension, true);
const std::vector<float> biasQuantScales{ 0.25f, 0.50f, 0.75f };
- const TensorInfo biasInfo({ 3 }, DataType::Signed32, biasQuantScales, quantDimension);
+ const TensorInfo biasInfo({ 3 }, DataType::Signed32, biasQuantScales, quantDimension, true);
std::vector<int8_t> kernelData = GenerateRandomData<int8_t>(kernelInfo.GetNumElements());
armnn::ConstTensor weights(kernelInfo, kernelData);
@@ -445,8 +445,8 @@ TEST_CASE("SerializeConvolution3d")
const armnn::TensorInfo inputInfo ({ 1, 5, 5, 5, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 1, 2, 2, 2, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 3, 3, 3, 1, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ 3, 3, 3, 1, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -530,8 +530,8 @@ TEST_CASE("SerializeDepthwiseConvolution2d")
const armnn::TensorInfo inputInfo ({ 1, 5, 5, 3 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 1, 3, 3, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 1, 3, 3, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ 1, 3, 3, 3 }, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -585,11 +585,11 @@ TEST_CASE("SerializeDepthwiseConvolution2dWithPerAxisParams")
const std::vector<float> quantScales{ 0.75f, 0.80f, 0.90f, 0.95f };
const unsigned int quantDimension = 0;
- TensorInfo kernelInfo({ 2, 2, 2, 2 }, DataType::QSymmS8, quantScales, quantDimension);
+ TensorInfo kernelInfo({ 2, 2, 2, 2 }, DataType::QSymmS8, quantScales, quantDimension, true);
const std::vector<float> biasQuantScales{ 0.25f, 0.35f, 0.45f, 0.55f };
constexpr unsigned int biasQuantDimension = 0;
- TensorInfo biasInfo({ 4 }, DataType::Signed32, biasQuantScales, biasQuantDimension);
+ TensorInfo biasInfo({ 4 }, DataType::Signed32, biasQuantScales, biasQuantDimension, true);
std::vector<int8_t> kernelData = GenerateRandomData<int8_t>(kernelInfo.GetNumElements());
armnn::ConstTensor weights(kernelInfo, kernelData);
@@ -685,7 +685,7 @@ TEST_CASE("SerializeDeserializeDetectionPostProcess")
descriptor.m_ScaleH = 5.0;
descriptor.m_ScaleW = 5.0;
- const armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
+ const armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32, 0.0f, 0, true);
const std::vector<float> anchorsData({
0.5f, 0.5f, 1.0f, 1.0f,
0.5f, 0.5f, 1.0f, 1.0f,
@@ -913,8 +913,8 @@ TEST_CASE("SerializeFullyConnected")
const armnn::TensorInfo inputInfo ({ 2, 5, 1, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 2, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 5, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ 5, 3 }, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
std::vector<float> biasesData = GenerateRandomData<float>(biasesInfo.GetNumElements());
armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -1003,8 +1003,8 @@ TEST_CASE("SerializeFullyConnectedWeightsAndBiasesAsConstantLayers")
const armnn::TensorInfo inputInfo ({ 2, 5, 1, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 2, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 5, 3 }, armnn::DataType::Float32);
- const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ 5, 3 }, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo biasesInfo ({ 3 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
std::vector<float> biasesData = GenerateRandomData<float>(biasesInfo.GetNumElements());
@@ -1077,7 +1077,7 @@ TEST_CASE("SerializeGather")
const std::string layerName("gather");
armnn::TensorInfo paramsInfo({ 8 }, armnn::DataType::QAsymmU8);
armnn::TensorInfo outputInfo({ 3 }, armnn::DataType::QAsymmU8);
- const armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32);
+ const armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32, 0.0f, 0, true);
GatherDescriptor descriptor;
descriptor.m_Axis = 1;
@@ -2447,7 +2447,7 @@ TEST_CASE("SerializeSwitch")
};
const std::string layerName("switch");
- const armnn::TensorInfo info({ 1, 4 }, armnn::DataType::Float32);
+ const armnn::TensorInfo info({ 1, 4 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> constantData = GenerateRandomData<float>(info.GetNumElements());
armnn::ConstTensor constTensor(info, constantData);
@@ -2509,8 +2509,8 @@ TEST_CASE("SerializeTransposeConvolution2d")
const armnn::TensorInfo inputInfo ({ 1, 7, 7, 1 }, armnn::DataType::Float32);
const armnn::TensorInfo outputInfo({ 1, 9, 9, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
- const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32);
+ const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32, 0.0f, 0, true);
+ const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
armnn::ConstTensor weights(weightsInfo, weightsData);
@@ -2594,7 +2594,7 @@ TEST_CASE("SerializeDeserializeNonLinearNetwork")
};
const std::string layerName("constant");
- const armnn::TensorInfo info({ 2, 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo info({ 2, 3 }, armnn::DataType::Float32, 0.0f, 0, true);
std::vector<float> constantData = GenerateRandomData<float>(info.GetNumElements());
armnn::ConstTensor constTensor(info, constantData);