From 813f23049d73177edfc1f1cff71147c39f4b695e Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Tue, 19 May 2020 14:10:30 +0100 Subject: IVGCVSW-4453 Add Support for ANEURALNETWORKS_QLSTM to HAL 1.3 Driver * Add QLSTM support for Android NN Driver * Add overrideOutputInfo parameter to SetupAndTrackLayerOutputSlot * Add optional condition to GetInputScalar * Refactor Quantized 16 Bit LSTM impl Change-Id: Ie8fa98ad5ee4a62174ef91ca80f1df62b7fde937 Signed-off-by: Keith Davis Signed-off-by: Sadik Armagan --- ConversionUtils_1_2.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'ConversionUtils_1_2.hpp') diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp index c87cb030..29367f2f 100644 --- a/ConversionUtils_1_2.hpp +++ b/ConversionUtils_1_2.hpp @@ -1497,11 +1497,11 @@ bool ConvertQuantize(const HalOperation& operation, const HalModel& model, Conve template -bool ConvertQuantizedLstm(const HalOperation& operation, const HalModel& model, ConversionData& data) +bool ConvertQuantized16BitLstm(const HalOperation& operation, const HalModel& model, ConversionData& data) { using HalOperand = typename HalPolicy::Operand; - ALOGV("HalPolicy::ConvertQuantizedLstm()"); + ALOGV("HalPolicy::ConvertQuantized16BitLstm()"); //Inputs: // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize] -- cgit v1.2.1