From c49aacc83370e89435129650a30ef1b384712dfe Mon Sep 17 00:00:00 2001 From: Matthew Sloyan Date: Fri, 28 Apr 2023 17:27:26 +0100 Subject: IVGCVSW-7603 Implement Reshape operators for Opaque Delegate * Moved CreateOutputTensorShape function to common DelegateUtils.hpp Signed-off-by: Matthew Sloyan Change-Id: I3d8a9834ecd6b7cda170cce958677a0dde62824a --- delegate/CMakeLists.txt | 1 + delegate/classic/src/Redefine.hpp | 31 ------ delegate/common/src/DelegateUtils.hpp | 31 ++++++ delegate/opaque/src/Redefine.hpp | 194 ++++++++++++++++++++++++++++++--- delegate/opaque/src/armnn_delegate.cpp | 6 + 5 files changed, 216 insertions(+), 47 deletions(-) diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index bea5566193..003dffa807 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -304,6 +304,7 @@ if(BUILD_UNIT_TESTS) test/PreluTestHelper.hpp test/ReduceTest.cpp test/ReduceTestHelper.hpp + test/ReshapeTest.cpp test/ResizeTest.cpp test/ResizeTestHelper.hpp test/RoundTest.cpp diff --git a/delegate/classic/src/Redefine.hpp b/delegate/classic/src/Redefine.hpp index 7aef74f76b..41c62c33c8 100644 --- a/delegate/classic/src/Redefine.hpp +++ b/delegate/classic/src/Redefine.hpp @@ -13,7 +13,6 @@ #include #include #include -#include namespace armnnDelegate { @@ -84,36 +83,6 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData, return Connect(layer, tfLiteNode, delegateData); } - -TfLiteStatus CreateOutputTensorShape(const armnn::TensorInfo& inputTensorInfo, - const std::vector& targetShape, - armnn::ReshapeDescriptor& reshapeDesc) -{ - std::vector outputDims(targetShape.begin(), targetShape.end()); - const auto stretchDim = std::find(targetShape.begin(), targetShape.end(), -1); - - if (stretchDim != targetShape.end()) - { - if (std::find(std::next(stretchDim), targetShape.end(), -1) != targetShape.end()) - { - // Return kTfLiteError and log the error after returning - return kTfLiteError; - } - - auto targetNumElements = - armnn::numeric_cast( - std::accumulate(targetShape.begin(), targetShape.end(), -1, std::multiplies())); - - auto stretchIndex = static_cast(std::distance(targetShape.begin(), stretchDim)); - outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements; - } - - armnn::TensorShape outputShape = armnn::TensorShape(static_cast(outputDims.size()), - outputDims.data()); - reshapeDesc.m_TargetShape = outputShape; - return kTfLiteOk; -} - TfLiteStatus VisitReshapeOperator(DelegateData& delegateData, TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode, diff --git a/delegate/common/src/DelegateUtils.hpp b/delegate/common/src/DelegateUtils.hpp index 51c70f9ba1..37fe9b5b84 100644 --- a/delegate/common/src/DelegateUtils.hpp +++ b/delegate/common/src/DelegateUtils.hpp @@ -21,6 +21,8 @@ #include #include +#include + namespace { @@ -138,4 +140,33 @@ void SetupConcatViewOrigin(const armnn::TensorInfo& inputTensorInfo, } } +TfLiteStatus CreateOutputTensorShape(const armnn::TensorInfo& inputTensorInfo, + const std::vector& targetShape, + armnn::ReshapeDescriptor& reshapeDesc) +{ + std::vector outputDims(targetShape.begin(), targetShape.end()); + const auto stretchDim = std::find(targetShape.begin(), targetShape.end(), -1); + + if (stretchDim != targetShape.end()) + { + if (std::find(std::next(stretchDim), targetShape.end(), -1) != targetShape.end()) + { + // Return kTfLiteError and log the error after returning + return kTfLiteError; + } + + auto targetNumElements = + armnn::numeric_cast( + std::accumulate(targetShape.begin(), targetShape.end(), -1, std::multiplies())); + + auto stretchIndex = static_cast(std::distance(targetShape.begin(), stretchDim)); + outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements; + } + + armnn::TensorShape outputShape = armnn::TensorShape(static_cast(outputDims.size()), + outputDims.data()); + reshapeDesc.m_TargetShape = outputShape; + return kTfLiteOk; +} + } // namespace anonymous diff --git a/delegate/opaque/src/Redefine.hpp b/delegate/opaque/src/Redefine.hpp index 7dd8561de4..dc424cff00 100644 --- a/delegate/opaque/src/Redefine.hpp +++ b/delegate/opaque/src/Redefine.hpp @@ -4,15 +4,7 @@ // #pragma once -#include - -#include "OpaqueDelegateUtils.hpp" - -#include -#include -#include -#include -#include +#include namespace armnnOpaqueDelegate { @@ -62,13 +54,13 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData, armnn::BackendId setBackend; auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) { FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CAST", - tfLiteContext, - IsCastSupported, - delegateData.m_Backends, - isSupported, - setBackend, - inputTensorInfo, - outInfo); + tfLiteContext, + IsCastSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo, + outInfo); }; // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the @@ -97,4 +89,174 @@ TfLiteStatus VisitCastOperator(DelegateData& delegateData, // Connect return Connect(layer, tfLiteContext, tfLiteNode, delegateData); } + +TfLiteStatus VisitReshapeOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t operatorCode) +{ + auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode); + + if (numInputs == 2) + { + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex)); + } + else + { + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + } + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + // Gather input indices and use to get input tensor. + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + // Gather output indices and use to get output tensors. + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); + + armnn::ReshapeDescriptor reshapeDesc; + std::vector targetShape; + + auto* reshapeOptions = reinterpret_cast(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode)); + + // The new shape can be defined by either a second input tensor or by a builtin option, we need to check for both. + // Options might be set without valid data. we need to check the dimensions are in a valid range. + if (reshapeOptions && reshapeOptions->num_dimensions > 0 && reshapeOptions->num_dimensions <= 8) + { + for (int i = 0; i < reshapeOptions->num_dimensions; ++i) + { + targetShape.push_back(reshapeOptions->shape[i]); + } + } + else if (numInputs == 2) + { + // Get shape from the second input tensor + const TfLiteOpaqueTensor* tfLiteShapeInputTensor = + TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]); + if (!IsValid(tfLiteContext, tfLiteShapeInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + int32_t numDims = TfLiteOpaqueTensorNumDims(tfLiteShapeInputTensor); + if (numDims != 1) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Target 'shape' input is not a 1D tensor in " + "operator #%d node #%d: Falling back to TfLiteOptions.", + operatorCode, nodeIndex); + } + else + { + // Get the shape data out of the input tensor + auto* shapeTensorDataPtr = static_cast(TfLiteOpaqueTensorData(tfLiteShapeInputTensor)); + int32_t shapeTensorNumValues = TfLiteOpaqueTensorDim(tfLiteShapeInputTensor, 0); + for (int32_t i = 0; i < shapeTensorNumValues; ++i) + { + targetShape.push_back(shapeTensorDataPtr[i]); + } + } + } + else + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Target shape not defined in reshape parameters or input tensor. " + "At least one method required in operator #%d node #%d: ", + operatorCode, nodeIndex); + return kTfLiteError; + } + + // Use the data to create the required tensor shape. + if (CreateOutputTensorShape(inputTensorInfo0, targetShape, reshapeDesc) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: At most one component of shape can be -1 in: " + "operator #%d node #%d: ", + operatorCode, nodeIndex); + return kTfLiteError; + } + + if (reshapeDesc.m_TargetShape.GetNumElements() != inputTensorInfo0.GetNumElements()) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Reshape, number of elements in output shape does not match input " + "operator #%d node #%d: ", + operatorCode, nodeIndex); + return kTfLiteError; + } + + bool isSupported = false; + armnn::BackendId setBackend; + auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) + { + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("RESHAPE", + tfLiteContext, + IsReshapeSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo0, + outInfo, + reshapeDesc); + }; + + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc); + layer->SetBackendId(setBackend); + ARMNN_ASSERT(layer != nullptr); + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + // try to connect the Constant Inputs if there are any + if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk ) + { + return kTfLiteError; + } + + // Connect + return Connect(layer, tfLiteContext, tfLiteNode, delegateData); +} + } diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp index c96f75dcb3..2fd8142169 100644 --- a/delegate/opaque/src/armnn_delegate.cpp +++ b/delegate/opaque/src/armnn_delegate.cpp @@ -1002,6 +1002,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinRelu6); + case kTfLiteBuiltinReshape: + return VisitReshapeOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinReshape); case kTfLiteBuiltinResizeNearestNeighbor: return VisitResizeOperator(delegateData, tfLiteContext, -- cgit v1.2.1