From 81b66f3aeea1d0e788b0ce2894a58fedc763470b Mon Sep 17 00:00:00 2001 From: Kevin May Date: Wed, 26 Apr 2023 14:55:36 +0100 Subject: IVGCVSW-7577, IVGCVSW-7578 Implement BatchToSpaceNd and SpaceToBatchNd in Opaque Delegate Signed-off-by: Kevin May Change-Id: I38304abce1a417bb69aced2a5b38e976ea0cbbc0 --- delegate/CMakeLists.txt | 2 + delegate/opaque/CMakeLists.txt | 1 + delegate/opaque/src/BatchSpace.hpp | 260 +++++++++++++++++++++++++++++++++ delegate/opaque/src/armnn_delegate.cpp | 12 ++ 4 files changed, 275 insertions(+) diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt index ab61337dce..7dc89d79cf 100644 --- a/delegate/CMakeLists.txt +++ b/delegate/CMakeLists.txt @@ -259,6 +259,8 @@ if(BUILD_UNIT_TESTS) common/src/test/DelegateTestInterpreterUtils.hpp opaque/src/test/ArmnnOpaqueDelegateTest.cpp opaque/src/test/DelegateTestInterpreter.cpp + test/BatchSpaceTest.cpp + test/BatchSpaceTestHelper.hpp test/CastTest.cpp test/CastTestHelper.hpp test/ComparisonTest.cpp diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt index 39df124310..958dcf6014 100644 --- a/delegate/opaque/CMakeLists.txt +++ b/delegate/opaque/CMakeLists.txt @@ -8,6 +8,7 @@ list(APPEND armnnOpaqueDelegateObject_sources include/armnn_delegate.hpp include/Version.hpp src/armnn_delegate.cpp + src/BatchSpace.hpp src/Convolution.hpp src/Redefine.hpp src/SharedFunctions.cpp diff --git a/delegate/opaque/src/BatchSpace.hpp b/delegate/opaque/src/BatchSpace.hpp index e16969768e..c760a14f5e 100644 --- a/delegate/opaque/src/BatchSpace.hpp +++ b/delegate/opaque/src/BatchSpace.hpp @@ -2,3 +2,263 @@ // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // + +#include + +#include +#include +#include +#include + +namespace armnnOpaqueDelegate +{ + +TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t operatorCode) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + int numInputs = 3; + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + const TfLiteOpaqueTensor* tfLiteBlockShapeTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, + inputTensors[1]); + if (!IsValid(tfLiteContext, tfLiteBlockShapeTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + const TfLiteOpaqueTensor* tfLiteCropsTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]); + if (!IsValid(tfLiteContext, tfLiteCropsTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, + outputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + const armnn::TensorInfo& blockShapeTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteBlockShapeTensor); + const armnn::TensorInfo& cropsTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteCropsTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); + + + // Copy memory into block and crops + std::vector blockShape(blockShapeTensorInfo.GetNumElements()); + ::memcpy(blockShape.data(), TfLiteOpaqueTensorData(tfLiteBlockShapeTensor), blockShapeTensorInfo.GetNumBytes()); + + std::vector cropsVector(cropsTensorInfo.GetNumElements()); + std::memcpy(cropsVector.data(), TfLiteOpaqueTensorData(tfLiteCropsTensor), cropsTensorInfo.GetNumBytes()); + + size_t step = 2; + std::vector> crops; + for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i) + { + crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]); + } + + // Make a descriptor + armnn::BatchToSpaceNdDescriptor descriptor; + descriptor.m_BlockShape = blockShape; + descriptor.m_Crops = crops; + descriptor.m_DataLayout = armnn::DataLayout::NHWC; + + // Check if supported + bool isSupported = false; + armnn::BackendId setBackend; + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) + { + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("BATCH_TO_SPACE_ND", + tfLiteContext, + IsBatchToSpaceNdSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo, + outputTensorInfo, + descriptor); + }; + + // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the + // support for the operator + // If supported, VisitBatchToSpaceNdOperator will be called again to add the layer to the network as seen below + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + // Add a BatchToSpace layer + armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor); + layer->SetBackendId(setBackend); + ARMNN_ASSERT(layer != nullptr); + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + // try to connect the Constant Inputs if there are any + if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk ) + { + return kTfLiteError; + } + + // Connect + return Connect(layer, tfLiteContext, tfLiteNode, delegateData); +} + +TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData, + TfLiteOpaqueContext* tfLiteContext, + TfLiteOpaqueNode* tfLiteNode, + int nodeIndex, + int32_t operatorCode) +{ + TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex)); + TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); + + int numInputs = 3; + const int* inputTensors; + if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + int numOutputs = 0; + const int* outputTensors; + if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) + { + TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( + tfLiteContext, + "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", + nodeIndex); + return kTfLiteError; + } + + const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,inputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + const TfLiteOpaqueTensor* tfLiteBlockShapeTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, + inputTensors[1]); + if (!IsValid(tfLiteContext, tfLiteBlockShapeTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + const TfLiteOpaqueTensor* tfLitePadListTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, + inputTensors[2]); + if (!IsValid(tfLiteContext, tfLitePadListTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, + outputTensors[0]); + if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) + { + return kTfLiteError; + } + + const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); + const armnn::TensorInfo& blockShapeTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteBlockShapeTensor); + const armnn::TensorInfo& padListTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLitePadListTensor); + const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); + + std::vector blockShape(blockShapeTensorInfo.GetNumElements()); + std::memcpy(blockShape.data(), + TfLiteOpaqueTensorData(tfLiteBlockShapeTensor), + blockShapeTensorInfo.GetNumBytes()); + + std::vector padListVector(padListTensorInfo.GetNumElements()); + std::memcpy(padListVector.data(), + TfLiteOpaqueTensorData(tfLitePadListTensor), + padListTensorInfo.GetNumBytes()); + + size_t step = 2; + std::vector> padList; + for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i) + { + padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]); + } + + armnn::SpaceToBatchNdDescriptor descriptor; + descriptor.m_BlockShape = blockShape; + descriptor.m_PadList = padList; + descriptor.m_DataLayout = armnn::DataLayout::NHWC; + + // Check if supported + bool isSupported = false; + armnn::BackendId setBackend; + auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) + { + FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SPACE_TO_BATCH_ND", + tfLiteContext, + IsSpaceToBatchNdSupported, + delegateData.m_Backends, + isSupported, + setBackend, + inputTensorInfo, + outputTensorInfo, + descriptor); + }; + + // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the + // support for the operator + // If supported, VisitSpaceToBatchNdOperator will be called again to add the layer to the network as seen below + if (!delegateData.m_Network) + { + validateFunc(outputTensorInfo, isSupported); + return isSupported ? kTfLiteOk : kTfLiteError; + } + + // Add a SpaceToBatch layer + armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToBatchNdLayer(descriptor); + layer->SetBackendId(setBackend); + ARMNN_ASSERT(layer != nullptr); + + armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); + outputSlot.SetTensorInfo(outputTensorInfo); + + // try to connect the Constant Inputs if there are any + if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk ) + { + return kTfLiteError; + } + + // Connect + return Connect(layer, tfLiteContext, tfLiteNode, delegateData); +} + +} // namespace \ No newline at end of file diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp index c305c4020c..7f3d8cf9e9 100644 --- a/delegate/opaque/src/armnn_delegate.cpp +++ b/delegate/opaque/src/armnn_delegate.cpp @@ -622,6 +622,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, { switch (TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration)) { + case kTfLiteBuiltinBatchToSpaceNd: + return VisitBatchToSpaceNdOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinBatchToSpaceNd); case kTfLiteBuiltinCast: return VisitCastOperator(delegateData, tfLiteContext, @@ -688,6 +694,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData, tfLiteNode, nodeIndex, kTfLiteBuiltinNotEqual); + case kTfLiteBuiltinSpaceToBatchNd: + return VisitSpaceToBatchNdOperator(delegateData, + tfLiteContext, + tfLiteNode, + nodeIndex, + kTfLiteBuiltinSpaceToBatchNd); default: return kTfLiteError; } -- cgit v1.2.1