diff options
author | Ellen Norris-Thompson <ellen.norris-thompson@arm.com> | 2019-08-05 14:20:32 +0100 |
---|---|---|
committer | Ellen Norris-Thompson <ellen.norris-thompson@arm.com> | 2019-08-22 11:18:39 +0100 |
commit | a3d7fad1dc3c29ab614f6f277aeea74dc67d219a (patch) | |
tree | 306957dc6c39ae52560b949944748a14c4b63023 /1.2 | |
parent | 976ad3e257c4a68e809f5a7fe3b7a99bb3f1fba4 (diff) | |
download | android-nn-driver-a3d7fad1dc3c29ab614f6f277aeea74dc67d219a.tar.gz |
IVGCVSW-3613 Add Quantized LSTM test to Android NN Driver
Signed-off-by: Ellen Norris-Thompson <ellen.norris-thompson@arm.com>
Change-Id: Ie3c535789186571df9000d75ad2a62ac8c94dcb4
Diffstat (limited to '1.2')
-rw-r--r-- | 1.2/HalPolicy.cpp | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp index 3cd7ce48..e08ae84f 100644 --- a/1.2/HalPolicy.cpp +++ b/1.2/HalPolicy.cpp @@ -849,73 +849,73 @@ bool HalPolicy::ConvertQuantizedLstm(const Operation& operation, const Model& mo // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the // LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin inputToInputWeightsPin = - ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 0, model, data); + ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data); // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the // LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin inputToForgetWeightsPin = - ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data); + ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data); // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the // LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin inputToCellWeightsPin = - ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data); + ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 3, model, data); // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the // LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin inputToOutputWeightsPin = - ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 3, model, data); + ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 4, model, data); // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside // the LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin recurrentToInputWeightsPin = - ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 4, model, data); + ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 5, model, data); // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside // the LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin recurrentToForgetWeightsPin = - ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 5, model, data); + ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 6, model, data); // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside // the LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin recurrentToCellWeightsPin = - ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 6, model, data); + ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 7, model, data); // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside // the LSTM cell. Quantization zero point and scale must be the same across all the weights. const ConstTensorPin recurrentToOutputWeightsPin = - ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 7, model, data); + ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 8, model, data); // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product // of input and weights scales and zeroPoint equal to 0. const ConstTensorPin inputGateBiasPin = - ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 8, model, data); + ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 9, model, data); // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product // of input and weights scales and zeroPoint equal to 0. const ConstTensorPin forgetGateBiasPin = - ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 9, model, data); + ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 10, model, data); // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input // and weights scales and zeroPoint equal to 0. const ConstTensorPin cellBiasPin = - ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 10, model, data); + ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 11, model, data); // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product // of input and weights scales and zeroPoint equal to 0. const ConstTensorPin outputGateBiasPin = - ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 11, model, data); + ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 12, model, data); if (!inputToInputWeightsPin.IsValid() || !inputToForgetWeightsPin.IsValid() || @@ -1014,8 +1014,8 @@ bool HalPolicy::ConvertQuantizedLstm(const Operation& operation, const Model& mo armnn::IConnectableLayer* const layer = data.m_Network->AddQuantizedLstmLayer(params, "QuantizedLstm"); input.Connect(layer->GetInputSlot(0)); - previousOutputIn.Connect(layer->GetInputSlot(1)); - previousCellStateIn.Connect(layer->GetInputSlot(2)); + previousCellStateIn.Connect(layer->GetInputSlot(1)); + previousOutputIn.Connect(layer->GetInputSlot(2)); return (SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, 0, model, data) && SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 1, *layer, 1, model, data)); |