From a3d7fad1dc3c29ab614f6f277aeea74dc67d219a Mon Sep 17 00:00:00 2001 From: Ellen Norris-Thompson Date: Mon, 5 Aug 2019 14:20:32 +0100 Subject: IVGCVSW-3613 Add Quantized LSTM test to Android NN Driver Signed-off-by: Ellen Norris-Thompson Change-Id: Ie3c535789186571df9000d75ad2a62ac8c94dcb4 --- 1.2/HalPolicy.cpp | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to '1.2') diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 3cd7ce48..e08ae84f 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -849,73 +849,73 @@ bool HalPolicy::ConvertQuantizedLstm(const Operation& operation, const Model& mo // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the // LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin inputToInputWeightsPin = - ConvertOperationInputToConstTensorPin(operation, 0, model, data); + ConvertOperationInputToConstTensorPin(operation, 1, model, data); // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the // LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin inputToForgetWeightsPin = - ConvertOperationInputToConstTensorPin(operation, 1, model, data); + ConvertOperationInputToConstTensorPin(operation, 2, model, data); // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the // LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin inputToCellWeightsPin = - ConvertOperationInputToConstTensorPin(operation, 2, model, data); + ConvertOperationInputToConstTensorPin(operation, 3, model, data); // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the // LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin inputToOutputWeightsPin = - ConvertOperationInputToConstTensorPin(operation, 3, model, data); + ConvertOperationInputToConstTensorPin(operation, 4, model, data); // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside // the LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin recurrentToInputWeightsPin = - ConvertOperationInputToConstTensorPin(operation, 4, model, data); + ConvertOperationInputToConstTensorPin(operation, 5, model, data); // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside // the LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin recurrentToForgetWeightsPin = - ConvertOperationInputToConstTensorPin(operation, 5, model, data); + ConvertOperationInputToConstTensorPin(operation, 6, model, data); // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside // the LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin recurrentToCellWeightsPin = - ConvertOperationInputToConstTensorPin(operation, 6, model, data); + ConvertOperationInputToConstTensorPin(operation, 7, model, data); // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside // the LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin recurrentToOutputWeightsPin = - ConvertOperationInputToConstTensorPin(operation, 7, model, data); + ConvertOperationInputToConstTensorPin(operation, 8, model, data); // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product // of input and weights scales and zeroPoint equal to 0. const ConstTensorPin inputGateBiasPin = - ConvertOperationInputToConstTensorPin(operation, 8, model, data); + ConvertOperationInputToConstTensorPin(operation, 9, model, data); // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product // of input and weights scales and zeroPoint equal to 0. const ConstTensorPin forgetGateBiasPin = - ConvertOperationInputToConstTensorPin(operation, 9, model, data); + ConvertOperationInputToConstTensorPin(operation, 10, model, data); // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input // and weights scales and zeroPoint equal to 0. const ConstTensorPin cellBiasPin = - ConvertOperationInputToConstTensorPin(operation, 10, model, data); + ConvertOperationInputToConstTensorPin(operation, 11, model, data); // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product // of input and weights scales and zeroPoint equal to 0. const ConstTensorPin outputGateBiasPin = - ConvertOperationInputToConstTensorPin(operation, 11, model, data); + ConvertOperationInputToConstTensorPin(operation, 12, model, data); if (!inputToInputWeightsPin.IsValid() || !inputToForgetWeightsPin.IsValid() || @@ -1014,8 +1014,8 @@ bool HalPolicy::ConvertQuantizedLstm(const Operation& operation, const Model& mo armnn::IConnectableLayer* const layer = data.m_Network->AddQuantizedLstmLayer(params, "QuantizedLstm"); input.Connect(layer->GetInputSlot(0)); - previousOutputIn.Connect(layer->GetInputSlot(1)); - previousCellStateIn.Connect(layer->GetInputSlot(2)); + previousCellStateIn.Connect(layer->GetInputSlot(1)); + previousOutputIn.Connect(layer->GetInputSlot(2)); return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) && SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data)); -- cgit v1.2.1