From 080d45d73c03830cb80b223fd64c546e84d8337a Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Fri, 10 Nov 2023 17:11:53 +0000 Subject: MLCE-1138 Issue with Delegate supporting FP16 models * Fixed issue where backends were asked to support FP16 layers that would be optimized out. * Fixed issue where backends were asked to support non-constant filter and bias tensors when those tensors would be replaced by constant tensors during optimization. Signed-off-by: Mike Kelly Change-Id: Ib54b9cb99d5014e27172841a665daf57d1d5b23d --- delegate/opaque/src/Convolution.hpp | 51 ++++++++++++++-- delegate/opaque/src/Quantization.hpp | 26 ++++++--- delegate/opaque/src/SharedFunctions.cpp | 100 ++++++++++++++++++++++++++++++++ delegate/opaque/src/SharedFunctions.hpp | 6 ++ 4 files changed, 171 insertions(+), 12 deletions(-) (limited to 'delegate/opaque') diff --git a/delegate/opaque/src/Convolution.hpp b/delegate/opaque/src/Convolution.hpp index 384c62b678..e4393e7bb0 100644 --- a/delegate/opaque/src/Convolution.hpp +++ b/delegate/opaque/src/Convolution.hpp @@ -138,6 +138,27 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData, armnn::BackendId setBackend; if (!delegateData.m_Network) { + bool filterIsConst = filterTensorInfo.IsConstant(); + + if (!filterIsConst) + { + filterIsConst = WillInputBeOptimizedToConst(tfLiteContext, inputTensors[1]); + } + armnn::TensorInfo filterTensorInfoCopy(filterTensorInfo); + filterTensorInfoCopy.SetConstant(filterIsConst); + armnn::Optional optionalBiasInfoCopy(biasTensorInfo); + + if (biasEnabled) + { + bool biasIsConst = biasTensorInfo.IsConstant(); + + if (!biasIsConst) + { + biasIsConst = WillInputBeOptimizedToConst(tfLiteContext, inputTensors[2]); + } + optionalBiasInfoCopy.value().SetConstant(biasIsConst); + } + bool isSupported = false; FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CONV2D", tfLiteContext, @@ -148,8 +169,8 @@ TfLiteStatus VisitConv2dOperator(DelegateData& delegateData, inputTensorInfo, outputTensorInfo, descriptor, - filterTensorInfo, - optionalBiasInfo); + filterTensorInfoCopy, + optionalBiasInfoCopy); return isSupported ? kTfLiteOk : kTfLiteError; } @@ -339,6 +360,28 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData, armnn::BackendId setBackend; if (!delegateData.m_Network) { + bool filterIsConst = filterTensorInfo.IsConstant(); + + if (!filterIsConst) + { + filterIsConst = WillInputBeOptimizedToConst(tfLiteContext, inputTensors[1]); + } + armnn::TensorInfo filterTensorInfoCopy(filterTensorInfo); + filterTensorInfoCopy.SetConstant(filterIsConst); + + armnn::Optional optionalBiasInfoCopy(biasTensorInfo); + + if (biasEnabled) + { + bool biasIsConst = biasTensorInfo.IsConstant(); + + if (!biasIsConst) + { + biasIsConst = WillInputBeOptimizedToConst(tfLiteContext, inputTensors[2]); + } + optionalBiasInfoCopy.value().SetConstant(biasIsConst); + } + bool isSupported = false; FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("DEPTHWISE_CONV2D", tfLiteContext, @@ -349,8 +392,8 @@ TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData, inputTensorInfo, outputTensorInfo, descriptor, - filterTensorInfo, - armnn::Optional(biasTensorInfo)); + filterTensorInfoCopy, + optionalBiasInfoCopy); return isSupported ? kTfLiteOk : kTfLiteError; } diff --git a/delegate/opaque/src/Quantization.hpp b/delegate/opaque/src/Quantization.hpp index d7f5c5c73f..e2e5f7618d 100644 --- a/delegate/opaque/src/Quantization.hpp +++ b/delegate/opaque/src/Quantization.hpp @@ -31,6 +31,7 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData, } const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) { return kTfLiteError; @@ -63,14 +64,23 @@ TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData, armnn::BackendId setBackend; auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) { - FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("DEQUANTIZE", - tfLiteContext, - IsDequantizeSupported, - delegateData.m_Backends, - isSupported, - setBackend, - inputTensorInfo, - outputTensorInfo); + // If this is a Dequantize with a Constant input then will be replaced by a Constant layer that contains the + // dequantized values during optimization so there's no need to check if it can be supported by the backend + if (IsConstantTensor(tfLiteInputTensor)) + { + isSupported = true; + } + else + { + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("DEQUANTIZE", + tfLiteContext, + IsDequantizeSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo, + outputTensorInfo); + } }; if (!delegateData.m_Network) diff --git a/delegate/opaque/src/SharedFunctions.cpp b/delegate/opaque/src/SharedFunctions.cpp index 93eb143bd0..0a0c630697 100644 --- a/delegate/opaque/src/SharedFunctions.cpp +++ b/delegate/opaque/src/SharedFunctions.cpp @@ -100,5 +100,105 @@ TfLiteStatus ValidateFusedActivationOperator(DelegateData& delegateData, return isSupported ? kTfLiteOk : kTfLiteError; } +TfLiteOpaqueNode* GetNodeConnectedToInput(TfLiteOpaqueContext* tfLiteContext, + int32_t& connectedIndex, + int32_t inputIdx) +{ + TfLiteIntArray* executionPlan = nullptr; + if (TfLiteOpaqueContextGetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: Unable to get graph execution plan."); + return nullptr; + } + + for (int i = 0; i < executionPlan->size; ++i) + { + connectedIndex = executionPlan->data[i]; + + // If TfLite nodes can be delegated to ArmNN + TfLiteOpaqueNode* connectedNode = nullptr; + TfLiteRegistrationExternal* tfLiteRegistration = nullptr; + if (TfLiteOpaqueContextGetNodeAndRegistration( + tfLiteContext, connectedIndex, &connectedNode, &tfLiteRegistration) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to get node and registration for node " + "%d.", connectedIndex); + continue; + } + int numOutputs = 0; + const int* outputTensors; + + if (TfLiteOpaqueNodeOutputs(connectedNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + connectedIndex); + continue; + } + + for (int j= 0; j < numOutputs; ++j) + { + if (outputTensors[j] == inputIdx) + { + return connectedNode; + } + } + } + // No node found so set connectedIndex to -1 + connectedIndex = -1; + return nullptr; +} + +bool WillInputBeOptimizedToConst(TfLiteOpaqueContext* tfLiteContext, int32_t inputIdx) +{ + int32_t connectedIndex; + TfLiteOpaqueNode* connectedNode = GetNodeConnectedToInput(tfLiteContext, connectedIndex, inputIdx); + + if (connectedNode) + { + TfLiteRegistrationExternal* tfLiteRegistration = nullptr; + + if (TfLiteOpaqueContextGetNodeAndRegistration(tfLiteContext, connectedIndex, &connectedNode, + &tfLiteRegistration) == kTfLiteOk) + { + switch (TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration)) + { + case kTfLiteBuiltinDequantize: + { + auto numInputs = TfLiteOpaqueNodeNumberOfInputs(connectedNode); + if (numInputs >= 1) + { + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(connectedNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + connectedIndex); + return kTfLiteError; + } + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, + inputTensors[0]); + + // If the input to the Dequantize is a Constant then both that Constant layer and the Dequantize + // layer will be replaced by a single Constant layer containing the dequantized values. + if (IsConstantTensor(tfLiteInputTensor)) + { + return true; + } + } + break; + } + default: + { + } + } + } + } + return false; +} + } // namespace armnnDelegate diff --git a/delegate/opaque/src/SharedFunctions.hpp b/delegate/opaque/src/SharedFunctions.hpp index 72fbe6a332..4698a1e989 100644 --- a/delegate/opaque/src/SharedFunctions.hpp +++ b/delegate/opaque/src/SharedFunctions.hpp @@ -23,5 +23,11 @@ TfLiteStatus ValidateFusedActivationOperator(DelegateData& delegateData, const armnn::TensorInfo& outputInfo, TfLiteFusedActivation activationType); +TfLiteOpaqueNode* GetNodeConnectedToInput(TfLiteOpaqueContext* tfLiteContext, + int32_t& connectedIndex, + int32_t inputIdx); + +bool WillInputBeOptimizedToConst(TfLiteOpaqueContext* tfLiteContext, int32_t inputIdx); + } // namespace armnnOpaqueDelegate -- cgit v1.2.1