// // Copyright © 2020 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include #include "DelegateUtils.hpp" #include #include #include #include #include namespace armnnDelegate { TfLiteStatus CreateOutputTensorShape(const armnn::TensorInfo& inputTensorInfo, const std::vector& targetShape, armnn::ReshapeDescriptor& reshapeDesc) { std::vector outputDims(targetShape.begin(), targetShape.end()); const auto stretchDim = std::find(targetShape.begin(), targetShape.end(), -1); if (stretchDim != targetShape.end()) { if (std::find(std::next(stretchDim), targetShape.end(), -1) != targetShape.end()) { // Return kTfLiteError and log the error after returning return kTfLiteError; } auto targetNumElements = armnn::numeric_cast( std::accumulate(targetShape.begin(), targetShape.end(), -1, std::multiplies())); auto stretchIndex = static_cast(std::distance(targetShape.begin(), stretchDim)); outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements; } armnn::TensorShape outputShape = armnn::TensorShape(static_cast(outputDims.size()), outputDims.data()); reshapeDesc.m_TargetShape = outputShape; return kTfLiteOk; } TfLiteStatus VisitReshapeOperator(DelegateData& delegateData, TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode, int nodeIndex, int32_t operatorCode) { auto numInputs = tfLiteNode->inputs->size; if (numInputs == 2) { TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); } else { TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); } TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]]; if (IsDynamicTensor(tfLiteInputTensor0)) { TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: Dynamic input tensors are not supported in " "operator #%d node #%d: ", operatorCode, nodeIndex); return kTfLiteError; } const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; if (IsDynamicTensor(tfLiteOutputTensor)) { TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: Dynamic output tensors are not supported in " "operator #%d node #%d: ", operatorCode, nodeIndex); return kTfLiteError; } const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0); const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); armnn::ReshapeDescriptor reshapeDesc; // The new shape can be defined by either a second input tensor or by a builtin option, we need to check for both. TfLiteReshapeParams* reshapeOptions = reinterpret_cast(tfLiteNode->builtin_data); std::vector targetShape; bool targetShapeFound = false; if (reshapeOptions != nullptr) { // Options might be set without valid data. we need to check the dimensions are in a valid range. if (reshapeOptions->num_dimensions > 0 && reshapeOptions->num_dimensions <= 8) { uint64_t elementCounter = 1; for (int i=0; i < reshapeOptions->num_dimensions; ++i) { targetShape.push_back(reshapeOptions->shape[i]); if (reshapeOptions->shape[i] > 0) { elementCounter = elementCounter * reshapeOptions->shape[i]; } } // Check the number of elements match, otherwise fall back to using the second input tensor. if (elementCounter <= inputTensorInfo0.GetNumElements()) { targetShapeFound = true; } } } if (!targetShapeFound) { if (numInputs == 2) { const TfLiteTensor& tfLiteShapeInputTensor = tfLiteTensors[tfLiteNode->inputs->data[1]]; if (IsDynamicTensor(tfLiteShapeInputTensor)) { TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: Dynamic input tensors are not supported in " "operator #%d node #%d: ", operatorCode, nodeIndex); return kTfLiteError; } if (tfLiteShapeInputTensor.dims->size != 1) { TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: Target 'shape' input is not a 1D tensor in " "operator #%d node #%d: ", operatorCode, nodeIndex); return kTfLiteError; } // Get the shape data out of the input tensor auto* shapeTensorDataPtr = tflite::GetTensorData(&tfLiteShapeInputTensor); auto shapeTensorNumValues = tfLiteShapeInputTensor.dims->data[0]; for (auto i=0; i < shapeTensorNumValues; ++i) { targetShape.push_back(*(shapeTensorDataPtr+i)); } } else { TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext, "Target shape not defined in reshape parameters or input tensor. " "At least one method required in operator #%d node #%d: ", operatorCode, nodeIndex); return kTfLiteError; } } // Use the data to create the required tensor shape. if (CreateOutputTensorShape(inputTensorInfo0, targetShape, reshapeDesc) != kTfLiteOk) { TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: At most one component of shape can be -1 in: " "operator #%d node #%d: ", operatorCode, nodeIndex); return kTfLiteError; } if (reshapeDesc.m_TargetShape.GetNumElements() != inputTensorInfo0.GetNumElements()) { TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: Reshape, number of elements in output shape does not match input " "operator #%d node #%d: ", operatorCode, nodeIndex); return kTfLiteError; } bool isSupported = false; auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) { FORWARD_LAYER_SUPPORT_FUNC(__func__, tfLiteContext, IsReshapeSupported, delegateData.m_Backends, isSupported, inputTensorInfo0, outInfo, reshapeDesc); }; if (!delegateData.m_Network) { validateFunc(outputTensorInfo, isSupported); return isSupported ? kTfLiteOk : kTfLiteError; } armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc); ARMNN_ASSERT(layer != nullptr); armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); outputSlot.SetTensorInfo(outputTensorInfo); // Connect return Connect(layer, tfLiteNode, delegateData); } TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData, TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode, int nodeIndex, int32_t operatorCode) { armnn::IgnoreUnused(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode); return kTfLiteError; } TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData, TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode, int nodeIndex, int32_t operatorCode) { armnn::IgnoreUnused(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode); return kTfLiteError; } } // namespace armnnDelegate