From 84d63785eb2dceba297a685ebd98f1d29be47326 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Fri, 6 May 2022 12:14:16 +0100 Subject: IVGCVSW-6929 Fix for segfault in tflite delegate * It's possible that a model may have an input entry for bias tensors but that the index for those is -1. If it's -1 then it's not present. * Fixed logic error in IsOptionalOperandPresent: it returned false if it was present and true if it was missing. Signed-off-by: Mike Kelly Change-Id: I45ad8d8552122493c529b1a35a5689416ccfbb71 --- delegate/src/Convolution.hpp | 6 +++--- delegate/src/DelegateUtils.hpp | 6 ++++-- delegate/src/FullyConnected.hpp | 8 ++++---- delegate/src/Lstm.hpp | 24 ++++++++++++------------ delegate/src/UnidirectionalSequenceLstm.hpp | 24 ++++++++++++------------ 5 files changed, 35 insertions(+), 33 deletions(-) diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp index f02a56fc7d..3b23d6d500 100644 --- a/delegate/src/Convolution.hpp +++ b/delegate/src/Convolution.hpp @@ -35,7 +35,7 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData, armnn::Convolution2dDescriptor descriptor; const auto params = reinterpret_cast(tfLiteNode->builtin_data); - bool biasEnabled = tfLiteNode->inputs->size > 2; + bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2); descriptor.m_BiasEnabled = biasEnabled; descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex); descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex); @@ -225,7 +225,7 @@ TfLiteStatus VisitConv3dOperator(DelegateData& delegateData, armnn::Convolution3dDescriptor descriptor; const auto params = reinterpret_cast(tfLiteNode->builtin_data); - bool biasEnabled = tfLiteNode->inputs->size == 3 ? true : false; + bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2); descriptor.m_BiasEnabled = biasEnabled; descriptor.m_DataLayout = armnn::DataLayout::NDHWC; descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex); @@ -382,7 +382,7 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData, } TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); - bool biasEnabled = tfLiteNode->inputs->size > 2; + bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2); armnn::DepthwiseConvolution2dDescriptor descriptor; const auto params = reinterpret_cast(tfLiteNode->builtin_data); diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp index e0ba1cf4e7..46b2db9d64 100644 --- a/delegate/src/DelegateUtils.hpp +++ b/delegate/src/DelegateUtils.hpp @@ -586,11 +586,13 @@ TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer, bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex) { - if (tfLiteNode->inputs->data[operandIndex] < 0) { + // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or + // less then the input is not present. + if (tfLiteNode->inputs->size > operandIndex && tfLiteNode->inputs->data[operandIndex] >= 0) + { return true; } return false; - } TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer, diff --git a/delegate/src/FullyConnected.hpp b/delegate/src/FullyConnected.hpp index 18db550e5c..e8c13f2053 100644 --- a/delegate/src/FullyConnected.hpp +++ b/delegate/src/FullyConnected.hpp @@ -31,7 +31,7 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData, return kTfLiteError; } TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); - bool biasEnabled = (numInputs == 3); + bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2); const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; @@ -52,9 +52,9 @@ TfLiteStatus VisitFullyConnectedOperator(DelegateData& delegateData, return kTfLiteError; } - const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); - armnn::TensorInfo weightsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteWeightsTensor); - const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + armnn::TensorInfo weightsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteWeightsTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); // Fully Connected Layer accepts two dimensional weights input int32_t weightsDimension = static_cast(weightsTensorInfo.GetNumDimensions()); diff --git a/delegate/src/Lstm.hpp b/delegate/src/Lstm.hpp index 565c4817c0..b082db66b9 100644 --- a/delegate/src/Lstm.hpp +++ b/delegate/src/Lstm.hpp @@ -52,7 +52,7 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData, // Set the params structure for the AddLstmLayer call armnn::LstmInputParams params; - if (!IsOptionalOperandPresent(tfLiteNode, 1)) + if (IsOptionalOperandPresent(tfLiteNode, 1)) { params.m_InputToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 1); } @@ -62,7 +62,7 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData, params.m_InputToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 4); // Recurrent weight tensors of size {n_cell, n_output} - if (!IsOptionalOperandPresent(tfLiteNode, 5)) + if (IsOptionalOperandPresent(tfLiteNode, 5)) { params.m_RecurrentToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 5); } @@ -72,23 +72,23 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData, params.m_RecurrentToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 8); // Peephole weights tensors of size {n_cell}, representing a diagonal matrix. - if (!IsOptionalOperandPresent(tfLiteNode, 9)) + if (IsOptionalOperandPresent(tfLiteNode, 9)) { params.m_CellToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 9); } - if (!IsOptionalOperandPresent(tfLiteNode, 10)) + if (IsOptionalOperandPresent(tfLiteNode, 10)) { params.m_CellToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 10); } - if (!IsOptionalOperandPresent(tfLiteNode, 11)) + if (IsOptionalOperandPresent(tfLiteNode, 11)) { params.m_CellToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 11); } // Gates bias tensors of size {n_cell} - if (!IsOptionalOperandPresent(tfLiteNode, 12)) + if (IsOptionalOperandPresent(tfLiteNode, 12)) { params.m_InputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 12); } @@ -98,12 +98,12 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData, params.m_OutputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 15); // Projection weight tensor of size {n_output, n_cell} - if (!IsOptionalOperandPresent(tfLiteNode, 16)) + if (IsOptionalOperandPresent(tfLiteNode, 16)) { params.m_ProjectionWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 16); } // Projection bias tensor of size {n_output} - if (!IsOptionalOperandPresent(tfLiteNode, 17)) + if (IsOptionalOperandPresent(tfLiteNode, 17)) { params.m_ProjectionBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 17); } @@ -113,22 +113,22 @@ TfLiteStatus VisitLstmOperator(DelegateData& delegateData, armnn::TensorInfo cellStateInInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->inputs->data[19]]); // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix. - if (tfLiteNode->inputs->size >= 21 && !IsOptionalOperandPresent(tfLiteNode, 20)) + if (IsOptionalOperandPresent(tfLiteNode, 20)) { params.m_InputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 20); } - if (tfLiteNode->inputs->size >= 22 && !IsOptionalOperandPresent(tfLiteNode, 21)) + if (IsOptionalOperandPresent(tfLiteNode, 21)) { params.m_ForgetLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 21); } - if (tfLiteNode->inputs->size >= 23 && !IsOptionalOperandPresent(tfLiteNode, 22)) + if (IsOptionalOperandPresent(tfLiteNode, 22)) { params.m_CellLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 22); } - if (tfLiteNode->inputs->size >= 24 && !IsOptionalOperandPresent(tfLiteNode, 23)) + if (IsOptionalOperandPresent(tfLiteNode, 23)) { params.m_OutputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 23); } diff --git a/delegate/src/UnidirectionalSequenceLstm.hpp b/delegate/src/UnidirectionalSequenceLstm.hpp index bcf01cf2a9..1a02a0c1bc 100644 --- a/delegate/src/UnidirectionalSequenceLstm.hpp +++ b/delegate/src/UnidirectionalSequenceLstm.hpp @@ -54,7 +54,7 @@ TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData, // https://www.tensorflow.org/mlir/tfl_ops#tflunidirectional_sequence_lstm_tflunidirectionalsequencelstmop armnn::LstmInputParams params; - if (!IsOptionalOperandPresent(tfLiteNode, 1)) + if (IsOptionalOperandPresent(tfLiteNode, 1)) { params.m_InputToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 1); } @@ -64,7 +64,7 @@ TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData, params.m_InputToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 4); // Recurrent weight tensors of size {n_cell, n_output} - if (!IsOptionalOperandPresent(tfLiteNode, 5)) + if (IsOptionalOperandPresent(tfLiteNode, 5)) { params.m_RecurrentToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 5); } @@ -74,23 +74,23 @@ TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData, params.m_RecurrentToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 8); // Peephole weights tensors of size {n_cell}, representing a diagonal matrix. - if (!IsOptionalOperandPresent(tfLiteNode, 9)) + if (IsOptionalOperandPresent(tfLiteNode, 9)) { params.m_CellToInputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 9); } - if (!IsOptionalOperandPresent(tfLiteNode, 10)) + if (IsOptionalOperandPresent(tfLiteNode, 10)) { params.m_CellToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 10); } - if (!IsOptionalOperandPresent(tfLiteNode, 11)) + if (IsOptionalOperandPresent(tfLiteNode, 11)) { params.m_CellToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 11); } // Gates bias tensors of size {n_cell} - if (!IsOptionalOperandPresent(tfLiteNode, 12)) + if (IsOptionalOperandPresent(tfLiteNode, 12)) { params.m_InputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 12); } @@ -100,12 +100,12 @@ TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData, params.m_OutputGateBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 15); // Projection weight tensor of size {n_output, n_cell} - if (!IsOptionalOperandPresent(tfLiteNode, 16)) + if (IsOptionalOperandPresent(tfLiteNode, 16)) { params.m_ProjectionWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 16); } // Projection bias tensor of size {n_output} - if (!IsOptionalOperandPresent(tfLiteNode, 17)) + if (IsOptionalOperandPresent(tfLiteNode, 17)) { params.m_ProjectionBias = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 17); } @@ -115,22 +115,22 @@ TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData, armnn::TensorInfo cellStateInInfo = GetTensorInfoForTfLiteTensor(tfLiteTensors[tfLiteNode->inputs->data[19]]); // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix. - if (tfLiteNode->inputs->size >= 21 && !IsOptionalOperandPresent(tfLiteNode, 20)) + if (IsOptionalOperandPresent(tfLiteNode, 20)) { params.m_InputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 20); } - if (tfLiteNode->inputs->size >= 22 && !IsOptionalOperandPresent(tfLiteNode, 21)) + if (IsOptionalOperandPresent(tfLiteNode, 21)) { params.m_ForgetLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 21); } - if (tfLiteNode->inputs->size >= 23 && !IsOptionalOperandPresent(tfLiteNode, 22)) + if (IsOptionalOperandPresent(tfLiteNode, 22)) { params.m_CellLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 22); } - if (tfLiteNode->inputs->size >= 24 && !IsOptionalOperandPresent(tfLiteNode, 23)) + if (IsOptionalOperandPresent(tfLiteNode, 23)) { params.m_OutputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteTensors, tfLiteNode, 23); } -- cgit v1.2.1