// // Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include #include "DelegateUtils.hpp" #include #include #include #include #include namespace armnnDelegate { TfLiteStatus VisitCastOperator(DelegateData& delegateData, TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode, int nodeIndex, int32_t operatorCode) { TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) { return kTfLiteError; } const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) { return kTfLiteError; } const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); bool isSupported = false; auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) { FORWARD_LAYER_SUPPORT_FUNC("CAST", tfLiteContext, IsCastSupported, delegateData.m_Backends, isSupported, inputTensorInfo, outInfo); }; // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the // support for the operator // If supported, VisitCastOperator will be called again to add the layer to the network as seen further below if (!delegateData.m_Network) { validateFunc(outputTensorInfo, isSupported); return isSupported ? kTfLiteOk : kTfLiteError; } // Add a Cast layer armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer(); ARMNN_ASSERT(layer != nullptr); armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); outputSlot.SetTensorInfo(outputTensorInfo); // Connect return Connect(layer, tfLiteNode, delegateData); } TfLiteStatus CreateOutputTensorShape(const armnn::TensorInfo& inputTensorInfo, const std::vector& targetShape, armnn::ReshapeDescriptor& reshapeDesc) { std::vector outputDims(targetShape.begin(), targetShape.end()); const auto stretchDim = std::find(targetShape.begin(), targetShape.end(), -1); if (stretchDim != targetShape.end()) { if (std::find(std::next(stretchDim), targetShape.end(), -1) != targetShape.end()) { // Return kTfLiteError and log the error after returning return kTfLiteError; } auto targetNumElements = armnn::numeric_cast( std::accumulate(targetShape.begin(), targetShape.end(), -1, std::multiplies())); auto stretchIndex = static_cast(std::distance(targetShape.begin(), stretchDim)); outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements; } armnn::TensorShape outputShape = armnn::TensorShape(static_cast(outputDims.size()), outputDims.data()); reshapeDesc.m_TargetShape = outputShape; return kTfLiteOk; } TfLiteStatus VisitReshapeOperator(DelegateData& delegateData, TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode, int nodeIndex, int32_t operatorCode) { auto numInputs = tfLiteNode->inputs->size; if (numInputs == 2) { TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); } else { TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); } TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]]; if (!IsValid(tfLiteContext, tfLiteInputTensor0, operatorCode, nodeIndex)) { return kTfLiteError; } const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) { return kTfLiteError; } const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0); const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); armnn::ReshapeDescriptor reshapeDesc; std::vector targetShape; TfLiteReshapeParams* reshapeOptions = reinterpret_cast(tfLiteNode->builtin_data); // The new shape can be defined by either a second input tensor or by a builtin option, we need to check for both. // Options might be set without valid data. we need to check the dimensions are in a valid range. if (reshapeOptions && reshapeOptions->num_dimensions > 0 && reshapeOptions->num_dimensions <= 8) { for (int i=0; i < reshapeOptions->num_dimensions; ++i) { targetShape.push_back(reshapeOptions->shape[i]); } } else if (numInputs == 2) { // Get shape from the second input tensor const TfLiteTensor& tfLiteShapeInputTensor = tfLiteTensors[tfLiteNode->inputs->data[1]]; if (!IsValid(tfLiteContext, tfLiteShapeInputTensor, operatorCode, nodeIndex)) { return kTfLiteError; } if (tfLiteShapeInputTensor.dims->size != 1) { TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: Target 'shape' input is not a 1D tensor in " "operator #%d node #%d: Falling back to TfLiteOptions.", operatorCode, nodeIndex); } else { // Get the shape data out of the input tensor auto* shapeTensorDataPtr = tflite::GetTensorData(&tfLiteShapeInputTensor); auto shapeTensorNumValues = tfLiteShapeInputTensor.dims->data[0]; for (auto i=0; i < shapeTensorNumValues; ++i) { targetShape.push_back(*(shapeTensorDataPtr+i)); } } } else { TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext, "Target shape not defined in reshape parameters or input tensor. " "At least one method required in operator #%d node #%d: ", operatorCode, nodeIndex); return kTfLiteError; } // Use the data to create the required tensor shape. if (CreateOutputTensorShape(inputTensorInfo0, targetShape, reshapeDesc) != kTfLiteOk) { TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: At most one component of shape can be -1 in: " "operator #%d node #%d: ", operatorCode, nodeIndex); return kTfLiteError; } if (reshapeDesc.m_TargetShape.GetNumElements() != inputTensorInfo0.GetNumElements()) { TF_LITE_MAYBE_KERNEL_LOG( tfLiteContext, "TfLiteArmnnDelegate: Reshape, number of elements in output shape does not match input " "operator #%d node #%d: ", operatorCode, nodeIndex); return kTfLiteError; } bool isSupported = false; auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) { FORWARD_LAYER_SUPPORT_FUNC("RESHAPE", tfLiteContext, IsReshapeSupported, delegateData.m_Backends, isSupported, inputTensorInfo0, outInfo, reshapeDesc); }; if (!delegateData.m_Network) { validateFunc(outputTensorInfo, isSupported); return isSupported ? kTfLiteOk : kTfLiteError; } armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc); ARMNN_ASSERT(layer != nullptr); armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); outputSlot.SetTensorInfo(outputTensorInfo); // Connect return Connect(layer, tfLiteNode, delegateData); } TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData, TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode, int nodeIndex, int32_t operatorCode) { armnn::IgnoreUnused(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode); return kTfLiteError; } TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData, TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode, int nodeIndex, int32_t operatorCode) { armnn::IgnoreUnused(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode); return kTfLiteError; } } // namespace armnnDelegate