diff options
Diffstat (limited to 'delegate/classic/src/Redefine.hpp')
-rw-r--r-- | delegate/classic/src/Redefine.hpp | 187 |
1 files changed, 173 insertions, 14 deletions
diff --git a/delegate/classic/src/Redefine.hpp b/delegate/classic/src/Redefine.hpp index 41c62c33c8..2c29083719 100644 --- a/delegate/classic/src/Redefine.hpp +++ b/delegate/classic/src/Redefine.hpp @@ -5,8 +5,6 @@ #pragma once -#include <armnn/utility/IgnoreUnused.hpp> - #include <ClassicDelegateUtils.hpp> #include <tensorflow/lite/builtin_ops.h> @@ -231,13 +229,83 @@ TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { - armnn::IgnoreUnused(delegateData, - tfLiteContext, - tfLiteNode, - nodeIndex, - operatorCode); + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + auto* options = reinterpret_cast<TfLiteSqueezeParams*>(tfLiteNode->builtin_data); + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + + std::vector<uint32_t> squeezeDim; + // A single negative dim index is interpreted as a negative index in python + // Meaning the index will be the shape size plus the negative index value + if (options->num_squeeze_dims == 1 && options->squeeze_dims[0] < 0) + { + int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0]; + squeezeDim.push_back(static_cast<uint32_t>(dim)); + } + else + { + for (int32_t i = 0; i < options->num_squeeze_dims; ++i) + { + squeezeDim.push_back(static_cast<uint32_t>(options->squeeze_dims[i])); + } + } + + armnn::TensorInfo outputTensorInfo = OutputShapeOfSqueeze(squeezeDim, inputTensorInfo); + + armnn::ReshapeDescriptor reshapeDesc; + reshapeDesc.m_TargetShape = outputTensorInfo.GetShape(); + + bool isSupported = false; + armnn::BackendId setBackend; + auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC("SQUEEZE", + tfLiteContext, + IsReshapeSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo, + outInfo, + reshapeDesc); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc); + layer->SetBackendId(setBackend); + ARMNN_ASSERT(layer != nullptr); + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); - return kTfLiteError; + // try to connect the Constant Inputs if there are any + if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk) + { + return kTfLiteError; + } + + // Connect + return Connect(layer, tfLiteNode, delegateData); } TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData, @@ -246,13 +314,104 @@ TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData, int nodeIndex, int32_t operatorCode) { - armnn::IgnoreUnused(delegateData, - tfLiteContext, - tfLiteNode, - nodeIndex, - operatorCode); + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); - return kTfLiteError; + const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; + const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]]; + if (!IsValid(tfLiteContext, tfLiteAxisTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); + armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); + + auto* axisTensorData = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor); + int32_t axis = axisTensorData[0]; + + int32_t inputDimSize = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()); + if (axis > inputDimSize || axis < 0 - (inputDimSize + 1)) + { + TF_LITE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Axis must be in range " + "[0 - (inputDimSize + 1), inputDimSize] inclusive."); + return kTfLiteError; + } + + if(axis < 0) + { + axis = inputDimSize + axis + 1; + } + + std::vector<unsigned int> shape(static_cast<unsigned int>(inputDimSize) + 1); + unsigned int inputShapeIndex = 0; + for (unsigned int i = 0; i < static_cast<unsigned int>(inputDimSize + 1); ++i) + { + if (i == static_cast<unsigned int>(axis)) + { + shape[i] = 1; + } + else + { + shape[i] = inputTensorInfo.GetShape()[inputShapeIndex]; + ++inputShapeIndex; + } + } + + armnn::ReshapeDescriptor reshapeDesc; + reshapeDesc.m_TargetShape = armnn::TensorShape(static_cast<unsigned int>(inputDimSize + 1), shape.data()); + + bool isSupported = false; + armnn::BackendId setBackend; + auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) + { + FORWARD_LAYER_SUPPORT_FUNC("EXPAND_DIMS", + tfLiteContext, + IsReshapeSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo, + outInfo, + reshapeDesc); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc); + layer->SetBackendId(setBackend); + ARMNN_ASSERT(layer != nullptr); + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputTensorInfo.SetShape(reshapeDesc.m_TargetShape); + outputSlot.SetTensorInfo(outputTensorInfo); + + // try to connect the Constant Inputs if there are any + if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk) + { + return kTfLiteError; + } + + // Connect + return Connect(layer, tfLiteNode, delegateData); } } // namespace armnnDelegate |