From a7a12f5c3654da554ad6197beff0f0fc54681c92 Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Thu, 6 May 2021 10:05:28 +0100 Subject: IVGCVSW-5969 TfLiteDelegate: Add PACK operator Support * Added support for PACK which is equivalent to Arm NN STACK Signed-off-by: Matthew Sloyan Change-Id: I9ea134d0310eeea1caba30a8b9221712e9487c75 --- delegate/src/Pack.hpp | 109 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 delegate/src/Pack.hpp (limited to 'delegate/src/Pack.hpp') diff --git a/delegate/src/Pack.hpp b/delegate/src/Pack.hpp new file mode 100644 index 0000000000..59851cd637 --- /dev/null +++ b/delegate/src/Pack.hpp @@ -0,0 +1,109 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include +#include +#include + +namespace armnnDelegate +{ + +TfLiteStatus VisitPackOperator(DelegateData& delegateData, + TfLiteContext* tfLiteContext, + TfLiteNode* tfLiteNode, + int nodeIndex, + int32_t operatorCode) +{ + unsigned int numInputs = tfLiteNode->inputs->size; + if (numInputs < 1) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, "TfLiteArmnnDelegate: Must have at least one input in (%d != %d) in node #%d", + 1, numInputs, nodeIndex); + return kTfLiteError; + } + + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + + // Validate all inputs and get TensorInfo + std::vector inputTensorInfos; + for (unsigned int i = 0; i < numInputs; ++i) + { + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[i]]; + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + inputTensorInfos.emplace_back(inputTensorInfo); + } + + // Convert input tensors to const armnn::TensorInfo* type for FORWARD_LAYER_SUPPORT_FUNC. + std::vector inputConstTensorInfos; + std::transform(inputTensorInfos.begin(), + inputTensorInfos.end(), + std::back_inserter(inputConstTensorInfos), + [](armnn::TensorInfo& t)->const armnn::TensorInfo*{ return &t; }); + + // Validate output and get TensorInfo + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + armnn::StackDescriptor desc; + desc.m_NumInputs = static_cast(numInputs); + + // Get axis from TfLite parameters + auto* params = reinterpret_cast(tfLiteNode->builtin_data); + desc.m_Axis = static_cast(params->axis); + + // Use the tensor shape of the first input as the "correct" input shape in the descriptor + desc.m_InputShape = inputTensorInfos[0].GetShape(); + + // Check if supported + bool isSupported = false; + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC(__func__, + tfLiteContext, + IsStackSupported, + delegateData.m_Backends, + isSupported, + inputConstTensorInfos, + outputTensorInfo, + desc); + }; + + // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the + // support for the operator + // If supported, VisitPackOperator will be called again to add the layer to the network as seen below + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + // The TfLite Pack operator is equivalent to the ArmNN Stack operator + armnn::IConnectableLayer* layer = delegateData.m_Network->AddStackLayer(desc); + ARMNN_ASSERT(layer != nullptr); + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + // Connect + return Connect(layer, tfLiteNode, delegateData); +} + +} // namespace armnnDelegate -- cgit v1.2.1