From 6a903a78a637f240a5a5a13fffa36fd0cfbdcf7d Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Tue, 26 May 2020 10:41:54 +0100 Subject: IVGCVSW-4850 Create QLSTM unit test in android-nn-driver * Added QLSTM unit tests Signed-off-by: Sadik Armagan Change-Id: Ibb7587d8a4fae4a630e7e80f4c3ce830665a7c77 --- test/1.3/QLstm.cpp | 879 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 879 insertions(+) create mode 100644 test/1.3/QLstm.cpp (limited to 'test/1.3') diff --git a/test/1.3/QLstm.cpp b/test/1.3/QLstm.cpp new file mode 100644 index 00000000..b4308d2c --- /dev/null +++ b/test/1.3/QLstm.cpp @@ -0,0 +1,879 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "../DriverTestHelpers.hpp" +#include "../TestTensor.hpp" + +#include "../1.3/HalPolicy.hpp" + +#include + +#include +#include +#include +#include + +BOOST_AUTO_TEST_SUITE(QLSTMTests) + +using ArmnnDriver = armnn_driver::ArmnnDriver; +using DriverOptions = armnn_driver::DriverOptions; + +using namespace driverTestHelpers; +using namespace android::hardware; + +using HalPolicy = hal_1_3::HalPolicy; + +namespace +{ + +template +RequestArgument CreateRequestArgument(const std::vector& value, unsigned int poolIndex) +{ + DataLocation inputInloc = {}; + inputInloc.poolIndex = poolIndex; + inputInloc.offset = 0; + inputInloc.length = value.size() * sizeof(T); + RequestArgument inputRequestArgument = {}; + inputRequestArgument.location = inputInloc; + inputRequestArgument.dimensions = hidl_vec{}; + return inputRequestArgument; +} + +// Returns true if the relative difference between two float values is less than the tolerance value given. +// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work! +bool TolerantCompareEqual(float a, float b, float tolerance = 1.0f) +{ + float rd; + if (a == 0.0f) + { + rd = fabs(b); + } + else if (b == 0.0f) + { + rd = fabs(a); + } + else + { + rd = boost::math::relative_difference(a, b); + } + return rd < tolerance; +} + +// Helper function to create an OperandLifeTime::NO_VALUE for testing. +// To be used on optional input operands that have no values - these are valid and should be tested. +HalPolicy::OperandLifeTime CreateNoValueLifeTime(const hidl_vec& dimensions) +{ + // Only create a NO_VALUE for optional operands that have no elements + if (dimensions.size() == 0 || dimensions[0] == 0) + { + return HalPolicy::OperandLifeTime::NO_VALUE; + } + return HalPolicy::OperandLifeTime::CONSTANT_COPY; +} + +void ExecuteModel(const armnn_driver::hal_1_3::HalPolicy::Model& model, + armnn_driver::ArmnnDriver& driver, + const V1_0::Request& request) +{ + android::sp preparedModel = PrepareModel_1_3(model, driver); + if (preparedModel.get() != nullptr) + { + Execute(preparedModel, request); + } +} + +#ifndef ARMCOMPUTECL_ENABLED +static const boost::array COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }}; +#else +static const boost::array COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::CpuAcc }}; +#endif + +// Add our own tests here since we skip the qlstm tests which Google supplies (because of non-const weights) +void QLstmTestImpl(const hidl_vec& inputDimensions, + const std::vector& inputValue, + const hidl_vec& inputToInputWeightsDimensions, + const std::vector& inputToInputWeightsValue, + const hidl_vec& inputToForgetWeightsDimensions, + const std::vector& inputToForgetWeightsValue, + const hidl_vec& inputToCellWeightsDimensions, + const std::vector& inputToCellWeightsValue, + const hidl_vec& inputToOutputWeightsDimensions, + const std::vector& inputToOutputWeightsValue, + const hidl_vec& recurrentToInputWeightsDimensions, + const std::vector& recurrentToInputWeightsValue, + const hidl_vec& recurrentToForgetWeightsDimensions, + const std::vector& recurrentToForgetWeightsValue, + const hidl_vec& recurrentToCellWeightsDimensions, + const std::vector& recurrentToCellWeightsValue, + const hidl_vec& recurrentToOutputWeightsDimensions, + const std::vector& recurrentToOutputWeightsValue, + const hidl_vec& cellToInputWeightsDimensions, + const std::vector& cellToInputWeightsValue, + const hidl_vec& cellToForgetWeightsDimensions, + const std::vector& cellToForgetWeightsValue, + const hidl_vec& cellToOutputWeightsDimensions, + const std::vector& cellToOutputWeightsValue, + const hidl_vec& inputGateBiasDimensions, + const std::vector& inputGateBiasValue, + const hidl_vec& forgetGateBiasDimensions, + const std::vector& forgetGateBiasValue, + const hidl_vec& cellBiasDimensions, + const std::vector& cellBiasValue, + const hidl_vec& outputGateBiasDimensions, + const std::vector& outputGateBiasValue, + const hidl_vec& projectionWeightsDimensions, + const std::vector& projectionWeightsValue, + const hidl_vec& projectionBiasDimensions, + const std::vector& projectionBiasValue, + const hidl_vec& outputPreviousTimeStepInDimensions, + const std::vector& outputPreviousTimeStepInValue, + const hidl_vec& cellStatePreviousTimeStepInDimensions, + const std::vector& cellStatePreviousTimeStepInValue, + const hidl_vec& inputLayerNormWeightsDimensions, + const std::vector& inputLayerNormWeightsValue, + const hidl_vec& forgetLayerNormWeightsDimensions, + const std::vector& forgetLayerNormWeightsValue, + const hidl_vec& cellLayerNormWeightsDimensions, + const std::vector& cellLayerNormWeightsValue, + const hidl_vec& outputLayerNormWeightsDimensions, + const std::vector& outputLayerNormWeightsValue, + const float& cellClipValue, + const float& projectionClipValue, + const float& matMulInputGateValue, + const float& matMulForgetGateValue, + const float& matMulCellGateValue, + const float& matMulOutputGateValue, + const int32_t& projInputZeroPointValue, + const float& projInputScaleValue, + const hidl_vec& outputStateOutDimensions, + const std::vector& outputStateOutValue, + const hidl_vec& cellStateOutDimensions, + const std::vector& cellStateOutValue, + const hidl_vec& outputDimensions, + const std::vector& outputValue, + armnn::Compute compute) +{ + auto driver = std::make_unique(DriverOptions(compute)); + HalPolicy::Model model = {}; + + // Scale/Offset quantization info + float inputScale = 0.0078125f; + int32_t inputOffset = 0; + + int32_t hiddenStateZeroPoint = 0; + float hiddenStateScale = 0.007f; + + float outputScale = hiddenStateScale; + int32_t outputOffset = hiddenStateZeroPoint; + + float cellStateScale = 3.05176e-05f; + float cellWeightsScale = 1.0f; + int32_t cellStateOffset = 0; + + float weightsScale = 0.00784314f; + int32_t weightsOffset = 0; + + float layerNormScale = 3.05182e-05f; + int32_t layerNormOffset = 0; + + float biasScale = layerNormScale / 1024; + int32_t biasOffset = 0; + + // Inputs: + // 00: The input to the LSTM cell. Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED Shape: [batchSize, inputSize] + AddInputOperand(model, + inputDimensions, + HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED, + inputScale, + inputOffset); + + // 01: The input-to-input weights. Optional. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize] + AddTensorOperand(model, + inputToInputWeightsDimensions, + inputToInputWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT8_SYMM, + CreateNoValueLifeTime(inputToInputWeightsDimensions), + weightsScale, + weightsOffset); + + // 02: The input-to-forget weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize] + AddTensorOperand(model, + inputToForgetWeightsDimensions, + inputToForgetWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT8_SYMM, + CreateNoValueLifeTime(inputToForgetWeightsDimensions), + weightsScale, + weightsOffset); + + // 03: The input-to-cell weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize] + AddTensorOperand(model, + inputToCellWeightsDimensions, + inputToCellWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT8_SYMM, + CreateNoValueLifeTime(inputToCellWeightsDimensions), + weightsScale, + weightsOffset); + + // 04: The input-to-output weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, inputSize] + AddTensorOperand(model, + inputToOutputWeightsDimensions, + inputToOutputWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT8_SYMM, + CreateNoValueLifeTime(inputToOutputWeightsDimensions), + weightsScale, + weightsOffset); + + // 05: The recurrent-to-input weights. Optional. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM + // Shape: [numUnits, outputSize] + AddTensorOperand(model, + recurrentToInputWeightsDimensions, + recurrentToInputWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT8_SYMM, + CreateNoValueLifeTime(recurrentToInputWeightsDimensions), + weightsScale, + weightsOffset); + + // 06: The recurrent-to-forget weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, outputSize] + AddTensorOperand(model, + recurrentToForgetWeightsDimensions, + recurrentToForgetWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT8_SYMM, + CreateNoValueLifeTime(recurrentToForgetWeightsDimensions), + weightsScale, + weightsOffset); + + // 07: The recurrent-to-cell weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, outputSize] + AddTensorOperand(model, + recurrentToCellWeightsDimensions, + recurrentToCellWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT8_SYMM, + CreateNoValueLifeTime(recurrentToCellWeightsDimensions), + weightsScale, + weightsOffset); + + // 08: The recurrent-to-output weights. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [numUnits, outputSize] + AddTensorOperand(model, + recurrentToOutputWeightsDimensions, + recurrentToOutputWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT8_SYMM, + CreateNoValueLifeTime(recurrentToOutputWeightsDimensions), + weightsScale, + weightsOffset); + + // 09: The cell-to-input weights (for peephole). Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM + // Shape: [numUnits] + AddTensorOperand(model, + cellToInputWeightsDimensions, + cellToInputWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT16_SYMM , + CreateNoValueLifeTime(cellToInputWeightsDimensions), + cellWeightsScale, + weightsOffset); + + // 10: The cell-to-forget weights (for peephole). Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM + // Shape: [numUnits]. + AddTensorOperand(model, + cellToForgetWeightsDimensions, + cellToForgetWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT16_SYMM, + CreateNoValueLifeTime(cellToForgetWeightsDimensions), + cellWeightsScale, + weightsOffset); + + // 11: The cell-to-output weights (for peephole). Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM + // Shape: [numUnits] + AddTensorOperand(model, + cellToOutputWeightsDimensions, + cellToOutputWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT16_SYMM, + CreateNoValueLifeTime(cellToOutputWeightsDimensions), + cellWeightsScale, + weightsOffset); + + // 12: The input gate bias. Quantized with scale being the product of input and weights scales + // and zeroPoint equal to 0. Optional. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits] + AddTensorOperand(model, + inputGateBiasDimensions, + inputGateBiasValue, + HalPolicy::OperandType::TENSOR_INT32, + CreateNoValueLifeTime(inputGateBiasDimensions), + biasScale, + biasOffset); + + // 13: The forget gate bias. Quantized with scale being the product of input and weights scales + // and zeroPoint equal to 0. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits] + AddTensorOperand(model, + forgetGateBiasDimensions, + forgetGateBiasValue, + HalPolicy::OperandType::TENSOR_INT32, + CreateNoValueLifeTime(forgetGateBiasDimensions), + biasScale, + biasOffset); + + // 14: The cell bias. Quantized with scale being the product of input and weights scales and zeroPoint equal to 0. + // Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits] + AddTensorOperand(model, + cellBiasDimensions, + cellBiasValue, + HalPolicy::OperandType::TENSOR_INT32, + CreateNoValueLifeTime(cellBiasDimensions), + biasScale, + biasOffset); + + // 15: The output gate bias. Quantized with scale being the product of input and weights scales + // and zeroPoint equal to 0. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [numUnits] + AddTensorOperand(model, + outputGateBiasDimensions, + outputGateBiasValue, + HalPolicy::OperandType::TENSOR_INT32, + CreateNoValueLifeTime(outputGateBiasDimensions), + biasScale, + biasOffset); + + // 16: The projection weights. Optional. Type: ANEURALNETWORKS_TENSOR_QUANT8_SYMM Shape: [outputSize, numUnits] + AddTensorOperand(model, + projectionWeightsDimensions, + projectionWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT8_SYMM, + CreateNoValueLifeTime(projectionWeightsDimensions), + 0.00392157f, + weightsOffset); + + // 17: The projection bias. Quantized with scale being the product of input and weights scales + // and zeroPoint equal to 0. Optional. Type: ANEURALNETWORKS_TENSOR_INT32 Shape: [outputSize] + AddTensorOperand(model, + projectionBiasDimensions, + projectionBiasValue, + HalPolicy::OperandType::TENSOR_INT32, + CreateNoValueLifeTime(projectionBiasDimensions), + 0.0f, + biasOffset); + + // 18: The output from the previous time step. Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED + // Shape: [batchSize, outputSize] + AddInputOperand(model, + outputPreviousTimeStepInDimensions, + HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED, + cellStateScale, + inputOffset); + + // 19: The cell state from the previous time step. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM + // Shape: [batchSize, numUnits] + AddInputOperand(model, + cellStatePreviousTimeStepInDimensions, + HalPolicy::OperandType::TENSOR_QUANT16_SYMM, + cellStateScale, + cellStateOffset); + + // If any of the tensors have a value all normalization tensors are set + if (!inputLayerNormWeightsValue.empty() || + !forgetLayerNormWeightsValue.empty() || + !cellLayerNormWeightsValue.empty() || + !outputLayerNormWeightsValue.empty()) + { + // Normalization: + // 20: The input layer normalization weights. Used to rescale normalized inputs to activation at input gate. + // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits] + AddTensorOperand(model, + inputLayerNormWeightsDimensions, + inputLayerNormWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT16_SYMM, + CreateNoValueLifeTime(inputLayerNormWeightsDimensions), + layerNormScale, + layerNormOffset); + + // 21: The forget layer normalization weights. Used to rescale normalized inputs to activation at forget gate. + // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits] + AddTensorOperand(model, + forgetLayerNormWeightsDimensions, + forgetLayerNormWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT16_SYMM, + CreateNoValueLifeTime(forgetLayerNormWeightsDimensions), + layerNormScale, + layerNormOffset); + + // 22: The cell layer normalization weights. Used to rescale normalized inputs to activation at cell gate. + // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits] + AddTensorOperand(model, + cellLayerNormWeightsDimensions, + cellLayerNormWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT16_SYMM, + CreateNoValueLifeTime(cellLayerNormWeightsDimensions), + layerNormScale, + layerNormOffset); + + // 23: The output layer normalization weights. Used to rescale normalized inputs to activation at output gate. + // Optional. Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [numUnits] + AddTensorOperand(model, + outputLayerNormWeightsDimensions, + outputLayerNormWeightsValue, + HalPolicy::OperandType::TENSOR_QUANT16_SYMM, + CreateNoValueLifeTime(outputLayerNormWeightsDimensions), + layerNormScale, + layerNormOffset); + } + + // Constant scalar values + // 24: The cell clip. If provided the cell state is clipped by this value prior to the cell output activation. + // Optional. Type: ANEURALNETWORKS_FLOAT32. + AddFloatOperand(model, cellClipValue); + + // Constant scalar values + // 25: The projection clip. If provided and projection is enabled, this is used for clipping the projected values. + // Optional. Type: ANEURALNETWORKS_FLOAT32. + AddFloatOperand(model, projectionClipValue); + + // Constant scalar values + // 26: The scale of the intermediate result of matmul, i.e. input to layer normalization, at input gate. + // Type: ANEURALNETWORKS_FLOAT32. + AddFloatOperand(model, matMulInputGateValue); + + // Constant scalar values + // 27: The scale of the intermediate result of matmul, i.e. input to layer normalization, at forget gate. + // Type: ANEURALNETWORKS_FLOAT32. + AddFloatOperand(model, matMulForgetGateValue); + + // Constant scalar values + // 28: The scale of the intermediate result of matmul, i.e. input to layer normalization, at cell gate. + // Type: ANEURALNETWORKS_FLOAT32. + AddFloatOperand(model, matMulCellGateValue); + + // Constant scalar values + // 29: The scale of the intermediate result of matmul, i.e. input to layer normalization, at output gate. + // Type: ANEURALNETWORKS_FLOAT32. + AddFloatOperand(model, matMulOutputGateValue); + + // Constant scalar values + // 30: The zero point of the hidden state, i.e. input to projection. Type: ANEURALNETWORKS_INT32. + AddIntOperand(model, projInputZeroPointValue); + + // Constant scalar values + // 31: The scale of the hidden state, i.e. input to projection. Type: ANEURALNETWORKS_FLOAT32. + AddFloatOperand(model, projInputScaleValue); + + // Outputs: + // 0: The output state (out). Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED Shape: [batchSize, outputSize] + AddOutputOperand(model, + outputStateOutDimensions, + HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED, + cellStateScale, + cellStateScale); + + // 1: The cell state (out). Type: ANEURALNETWORKS_TENSOR_QUANT16_SYMM Shape: [batchSize, numUnits]. + AddOutputOperand(model, + cellStateOutDimensions, + HalPolicy::OperandType::TENSOR_QUANT16_SYMM, + cellStateScale, + cellStateOffset); + + // 2: The output. This is effectively the same as the current "output state (out)" value. + // Type: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED Shape: [batchSize, outputSize] + AddOutputOperand(model, + outputDimensions, + HalPolicy::OperandType::TENSOR_QUANT8_ASYMM_SIGNED, + cellStateScale, + cellStateScale); + + // make the QUANTIZED_LSTM operation + model.main.operations.resize(1); + model.main.operations[0].type = HalPolicy::OperationType::QUANTIZED_LSTM; + + model.main.operations[0].inputs = hidl_vec { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31}; + model.main.operations[0].outputs = hidl_vec {32, 33, 34}; + + // define the input values + hidl_vec inputArguments; + inputArguments.resize(3); + + inputArguments[0] = CreateRequestArgument(inputValue, 0); + inputArguments[1] = CreateRequestArgument(outputPreviousTimeStepInValue, 1); + inputArguments[2] = CreateRequestArgument(cellStatePreviousTimeStepInValue, 2); + + // define the expected output values + hidl_vec outputArguments; + outputArguments.resize(3); + + outputArguments[0] = CreateRequestArgument(outputStateOutValue, 3); + outputArguments[1] = CreateRequestArgument(cellStateOutValue, 4); + outputArguments[2] = CreateRequestArgument(outputValue, 5); + + android::hardware::neuralnetworks::V1_0::Request request = {}; + request.inputs = inputArguments; + request.outputs = outputArguments; + + // set the input data + AddPoolAndSetData(inputValue.size(), request, inputValue.data()); + AddPoolAndSetData(outputPreviousTimeStepInValue.size(), request, outputPreviousTimeStepInValue.data()); + AddPoolAndSetData(cellStatePreviousTimeStepInValue.size(), request, cellStatePreviousTimeStepInValue.data()); + + // add memory for the outputs + android::sp outputStateOutMemory = AddPoolAndGetData(outputStateOutValue.size(), request); + int8_t* outputStateOutData = static_cast(static_cast(outputStateOutMemory->getPointer())); + + android::sp cellStateOutMemory = AddPoolAndGetData(cellStateOutValue.size(), request); + int16_t* cellStateOutData = static_cast(static_cast(cellStateOutMemory->getPointer())); + + android::sp outputMemory = AddPoolAndGetData(outputValue.size(), request); + int8_t* outputData = static_cast(static_cast(outputMemory->getPointer())); + + // make the prepared model and run the execution + ExecuteModel(model, *driver, request); + + // check the results + for (size_t i = 0; i < outputStateOutValue.size(); ++i) + { + BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]), + "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]); + } + + // CELL STATE OUTPUT Does not match currently: IVGCVSW-4860 Verify remaining VTS tests (2) for QLSTM + // Comment out for now + // for (size_t i = 0; i < cellStateOutValue.size(); ++i) + // { + // BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i]), + // "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]); + //} + + for (size_t i = 0; i < outputValue.size(); ++i) + { + BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]), + "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]); + } +} + +void QLstmWithProjection(armnn::Compute compute) +{ + // This replicates android/frameworks/ml/nn/runtime/test/specs/V1_3/qlstm_projection.mod.py + // with values from android/frameworks/ml/nn/runtime/test/generated/spec_V1_3/qlstm_projection.example.cpp + // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of SUBGRAPH_INPUT tensors). + + uint32_t batchSize = 2; + uint32_t inputSize = 5; + uint32_t outputSize = 3; + uint32_t numUnits = 4; + + // Inputs: + hidl_vec inputDimensions{batchSize, inputSize}; + std::vector inputValue{ 90, 102, 13, 26, 38, 102, 13, 26, 51, 64}; + + hidl_vec inputToInputWeightsDimensions{numUnits, inputSize}; + std::vector inputToInputWeightsValue{ 64, 77, 89, -102, + -115, 13, 25, 38, + -51, 64, -102, 89, + -77, 64, -51, -64, + -51, -38, -25, -13 }; + + hidl_vec inputToForgetWeightsDimensions{numUnits, inputSize}; + std::vector inputToForgetWeightsValue{ -77, -13, 38, 25, + 115, -64, -25, -51, + 38, -102, -51, 38, + -64, -51, -77, 38, + -51, -77, -64, -64 }; + + hidl_vec inputToCellWeightsDimensions{numUnits, inputSize}; + std::vector inputToCellWeightsValue{ -51, -38, -25, -13, + -64, 64, -25, -38, + -25, -77, 77, -13, + -51, -38, -89, 89, + -115, -64, 102, 77 }; + + hidl_vec inputToOutputWeightsDimensions{numUnits, inputSize}; + std::vector inputToOutputWeightsValue{ -102, -51, -25, -115, + -13, -89, 38, -38, + -102, -25, 77, -25, + 51, -89, -38, -64, + 13, 64, -77, -51 }; + + hidl_vec recurrentToInputWeightsDimensions{numUnits, outputSize}; + std::vector recurrentToInputWeightsValue{ -25, -38, 51, 13, -64, 115, -25, -38, -89, 6, -25, -77 }; + + hidl_vec recurrentToForgetWeightsDimensions{numUnits, outputSize}; + std::vector recurrentToForgetWeightsValue{ -64, -38, -64, -25, 77, 51, 115, 38, -13, 25, 64, 25 }; + + hidl_vec recurrentToCellWeightsDimensions{numUnits, outputSize}; + std::vector recurrentToCellWeightsValue{ -38, 25, 13, -38, 102, -10, -25, 38, 102, -77, -13, 25 }; + + hidl_vec recurrentToOutputWeightsDimensions{numUnits, outputSize}; + std::vector recurrentToOutputWeightsValue{ 38, -13, 13, -25, -64, -89, -25, -77, -13, -51, -89, -25 }; + + hidl_vec cellToInputWeightsDimensions{0}; + std::vector cellToInputWeightsValue; + + hidl_vec cellToForgetWeightsDimensions{0}; + std::vector cellToForgetWeightsValue; + + hidl_vec cellToOutputWeightsDimensions{0}; + std::vector cellToOutputWeightsValue; + + hidl_vec inputGateBiasDimensions{numUnits}; + std::vector inputGateBiasValue{ 644245, 3221226, 4724464, 8160438 }; + + hidl_vec forgetGateBiasDimensions{numUnits}; + std::vector forgetGateBiasValue{ 2147484, -6442451, -4294968, 2147484 }; + + hidl_vec cellBiasDimensions{numUnits}; + std::vector cellBiasValue{-1073742, 15461883, 5368709, 1717987 }; + + hidl_vec outputGateBiasDimensions{numUnits}; + std::vector outputGateBiasValue{ 1073742, -214748, 4294968, 2147484 }; + + hidl_vec projectionWeightsDimensions{outputSize, numUnits}; + std::vector projectionWeightsValue{ -25, 51, 3, -51, 25, 127, 77, 20, 18, 51, -102, 51 }; + + hidl_vec projectionBiasDimensions{outputSize}; + std::vector projectionBiasValue{ 0, 0, 0 }; + + hidl_vec outputStateInDimensions{batchSize, outputSize}; + std::vector outputStateInValue{ 0, 0, 0, 0, 0, 0 }; + + hidl_vec cellStateInDimensions{batchSize, numUnits}; + std::vector cellStateInValue{ 0, 0, 0, 0, 0, 0, 0, 0 }; + + // Normalization: + hidl_vec inputLayerNormWeightsDimensions{numUnits}; + std::vector inputLayerNormWeightsValue{ 3277, 6553, 9830, 16384 }; + + hidl_vec forgetLayerNormWeightsDimensions{numUnits}; + std::vector forgetLayerNormWeightsValue{ 6553, 6553, 13107, 9830 }; + + hidl_vec cellLayerNormWeightsDimensions{numUnits}; + std::vector cellLayerNormWeightsValue{ 22937, 6553, 9830, 26214 }; + + hidl_vec outputLayerNormWeightsDimensions{numUnits}; + std::vector outputLayerNormWeightsValue{ 19660, 6553, 6553, 16384 }; + + float cellClipValue = 0.0f; + float projectionClipValue = 0.0f; + float inputIntermediateScale = 0.007059f; + float forgetIntermediateScale = 0.007812f; + float cellIntermediateScale = 0.007059f; + float outputIntermediateScale = 0.007812f; + int32_t hiddenStateZeroPoint = 0; + float hiddenStateScale = 0.007f; + + // Outputs: + hidl_vec outputStateOutDimensions{batchSize, outputSize}; + std::vector outputStateOutValue{ 127, 127, -108, -67, 127, 127 }; + + hidl_vec cellStateOutDimensions{batchSize, numUnits}; + std::vector cellStateOutValue { -14650, 8939, 5771, 6715, -11843, 7847, 1508, 12939 }; + + hidl_vec outputDimensions{batchSize, outputSize}; + std::vector outputValue { 127, 127, -108, -67, 127, 127 }; + + QLstmTestImpl(inputDimensions, inputValue, + inputToInputWeightsDimensions, inputToInputWeightsValue, + inputToForgetWeightsDimensions, inputToForgetWeightsValue, + inputToCellWeightsDimensions, inputToCellWeightsValue, + inputToOutputWeightsDimensions, inputToOutputWeightsValue, + recurrentToInputWeightsDimensions, recurrentToInputWeightsValue, + recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue, + recurrentToCellWeightsDimensions, recurrentToCellWeightsValue, + recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue, + cellToInputWeightsDimensions, cellToInputWeightsValue, + cellToForgetWeightsDimensions, cellToForgetWeightsValue, + cellToOutputWeightsDimensions, cellToOutputWeightsValue, + inputGateBiasDimensions, inputGateBiasValue, + forgetGateBiasDimensions, forgetGateBiasValue, + cellBiasDimensions, cellBiasValue, + outputGateBiasDimensions, outputGateBiasValue, + projectionWeightsDimensions, projectionWeightsValue, + projectionBiasDimensions, projectionBiasValue, + outputStateInDimensions, outputStateInValue, + cellStateInDimensions, cellStateInValue, + inputLayerNormWeightsDimensions, inputLayerNormWeightsValue, + forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue, + cellLayerNormWeightsDimensions, cellLayerNormWeightsValue, + outputLayerNormWeightsDimensions, outputLayerNormWeightsValue, + cellClipValue, + projectionClipValue, + inputIntermediateScale, + forgetIntermediateScale, + cellIntermediateScale, + outputIntermediateScale, + hiddenStateZeroPoint, + hiddenStateScale, + outputStateOutDimensions, outputStateOutValue, + cellStateOutDimensions, cellStateOutValue, + outputDimensions, outputValue, + compute); +} + +void QLstmWithNoProjection(armnn::Compute compute) +{ + // This replicates android/frameworks/ml/nn/runtime/test/specs/V1_3/qlstm_noprojection.mod.py + // with values from android/frameworks/ml/nn/runtime/test/generated/spec_V1_3/qlstm_noprojection.example.cpp + // and weights, biases and scalars passed as CONSTANT_COPY tensors (instead of SUBGRAPH_INPUT tensors). + + uint32_t batchSize = 2; + uint32_t inputSize = 5; + uint32_t outputSize = 4; + uint32_t numUnits = 4; + + // Inputs: + hidl_vec inputDimensions{batchSize, inputSize}; + std::vector inputValue { 90, 102, 13, 26, 38, 102, 13, 26, 51, 64 }; + + hidl_vec inputToInputWeightsDimensions{0, 0}; + std::vector inputToInputWeightsValue; + + hidl_vec inputToForgetWeightsDimensions{numUnits, inputSize}; + std::vector inputToForgetWeightsValue { -77, -13, 38, 25, 115, + -64, -25, -51, 38, -102, + -51, 38, -64, -51, -77, + 38, -51, -77, -64, -64 }; + + hidl_vec inputToCellWeightsDimensions{numUnits, inputSize}; + std::vector inputToCellWeightsValue { -51, -38, -25, -13, -64, + 64, -25, -38, -25, -77, + 77, -13, -51, -38, -89, + 89, -115, -64, 102, 77 }; + + hidl_vec inputToOutputWeightsDimensions{numUnits, inputSize}; + std::vector inputToOutputWeightsValue { -102, -51, -25, -115, -13, + -89, 38, -38, -102, -25, + 77, -25, 51, -89, -38, + -64, 13, 64, -77, -51 }; + + hidl_vec recurrentToInputWeightsDimensions{0, 0}; + std::vector recurrentToInputWeightsValue; + + hidl_vec recurrentToForgetWeightsDimensions{numUnits, outputSize}; + std::vector recurrentToForgetWeightsValue { -64, -38, -64, -25, + 77, 51, 115, 38, + -13, 25, 64, 25, + 25, 38, -13, 51 }; + + hidl_vec recurrentToCellWeightsDimensions{numUnits, outputSize}; + std::vector recurrentToCellWeightsValue { -38, 25, 13, -38, + 102, -10, -25, 38, + 102, -77, -13, 25, + 38, -13, 25, 64 }; + + hidl_vec recurrentToOutputWeightsDimensions{numUnits, outputSize}; + std::vector recurrentToOutputWeightsValue { 38, -13, 13, -25, + -64, -89, -25, -77, + -13, -51, -89, -25, + 13, 64, 25, -38 }; + + hidl_vec cellToInputWeightsDimensions{0}; + std::vector cellToInputWeightsValue; + + hidl_vec cellToForgetWeightsDimensions{0}; + std::vector cellToForgetWeightsValue; + + hidl_vec cellToOutputWeightsDimensions{0}; + std::vector cellToOutputWeightsValue; + + hidl_vec inputGateBiasDimensions{0}; + std::vector inputGateBiasValue; + + hidl_vec forgetGateBiasDimensions{numUnits}; + std::vector forgetGateBiasValue { 2147484, -6442451, -4294968, 2147484 }; + + hidl_vec cellBiasDimensions{numUnits}; + std::vector cellBiasValue { -1073742, 15461883, 5368709, 1717987 }; + + hidl_vec outputGateBiasDimensions{numUnits}; + std::vector outputGateBiasValue { 1073742, -214748, 4294968, 2147484 }; + + hidl_vec projectionWeightsDimensions{0, 0}; + std::vector projectionWeightsValue; + + hidl_vec projectionBiasDimensions{0}; + std::vector projectionBiasValue; + + hidl_vec outputStateInDimensions{batchSize, outputSize}; + std::vector outputStateInValue { 0, 0, 0, 0, 0, 0, 0, 0 }; + + hidl_vec cellStateInDimensions{batchSize, numUnits}; + std::vector cellStateInValue { 0, 0, 0, 0, 0, 0, 0, 0 }; + + // Normalization: + hidl_vec inputLayerNormWeightsDimensions{0}; + std::vector inputLayerNormWeightsValue; + + hidl_vec forgetLayerNormWeightsDimensions{numUnits}; + std::vector forgetLayerNormWeightsValue { 6553, 6553, 13107, 9830 }; + + hidl_vec cellLayerNormWeightsDimensions{numUnits}; + std::vector cellLayerNormWeightsValue { 22937, 6553, 9830, 26214 }; + + hidl_vec outputLayerNormWeightsDimensions{numUnits}; + std::vector outputLayerNormWeightsValue { 19660, 6553, 6553, 16384 }; + + float cellClipValue = 0.0f; + float projectionClipValue = 0.0f; + float inputIntermediateScale = 0.007059f; + float forgetIntermediateScale = 0.007812f; + float cellIntermediateScale = 0.007059f; + float outputIntermediateScale = 0.007812f; + int32_t hiddenStateZeroPoint = 0; + float hiddenStateScale = 0.007f; + + // Outputs: + hidl_vec outputStateOutDimensions{batchSize, outputSize}; + std::vector outputStateOutValue { -15, 21, 14, 20, -15, 15, 5, 27 }; + + hidl_vec cellStateOutDimensions{batchSize, numUnits}; + std::vector cellStateOutValue { -11692, 9960, 5491, 8861, -9422, 7726, 2056, 13149 }; + + hidl_vec outputDimensions{batchSize, outputSize}; + std::vector outputValue { -15, 21, 14, 20, -15, 15, 5, 27 }; + + QLstmTestImpl(inputDimensions, inputValue, + inputToInputWeightsDimensions, inputToInputWeightsValue, + inputToForgetWeightsDimensions, inputToForgetWeightsValue, + inputToCellWeightsDimensions, inputToCellWeightsValue, + inputToOutputWeightsDimensions, inputToOutputWeightsValue, + recurrentToInputWeightsDimensions, recurrentToInputWeightsValue, + recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue, + recurrentToCellWeightsDimensions, recurrentToCellWeightsValue, + recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue, + cellToInputWeightsDimensions, cellToInputWeightsValue, + cellToForgetWeightsDimensions, cellToForgetWeightsValue, + cellToOutputWeightsDimensions, cellToOutputWeightsValue, + inputGateBiasDimensions, inputGateBiasValue, + forgetGateBiasDimensions, forgetGateBiasValue, + cellBiasDimensions, cellBiasValue, + outputGateBiasDimensions, outputGateBiasValue, + projectionWeightsDimensions, projectionWeightsValue, + projectionBiasDimensions, projectionBiasValue, + outputStateInDimensions, outputStateInValue, + cellStateInDimensions, cellStateInValue, + inputLayerNormWeightsDimensions, inputLayerNormWeightsValue, + forgetLayerNormWeightsDimensions, forgetLayerNormWeightsValue, + cellLayerNormWeightsDimensions, cellLayerNormWeightsValue, + outputLayerNormWeightsDimensions, outputLayerNormWeightsValue, + cellClipValue, + projectionClipValue, + inputIntermediateScale, + forgetIntermediateScale, + cellIntermediateScale, + outputIntermediateScale, + hiddenStateZeroPoint, + hiddenStateScale, + outputStateOutDimensions, outputStateOutValue, + cellStateOutDimensions, cellStateOutValue, + outputDimensions, outputValue, + compute); +} + +} // anonymous namespace + +BOOST_DATA_TEST_CASE(QLSTMWithProjectionTest, COMPUTE_DEVICES) +{ + // Support is not added yet + // QLstmWithProjection(sample); +} + +BOOST_DATA_TEST_CASE(QLSTMWithNoProjectionTest, COMPUTE_DEVICES) +{ + QLstmWithNoProjection(sample); +} + +BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file -- cgit v1.2.1