aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin May <kevin.may@arm.com>2023-04-26 14:55:36 +0100
committerKevin May <kevin.may@arm.com>2023-04-26 16:24:25 +0000
commit81b66f3aeea1d0e788b0ce2894a58fedc763470b (patch)
treeb27461cfb84b38b52bc58e63fdd83d472aac3af2
parent024ef0b460c802a7c841dcba4b7e894e714d4512 (diff)
downloadarmnn-81b66f3aeea1d0e788b0ce2894a58fedc763470b.tar.gz
IVGCVSW-7577, IVGCVSW-7578 Implement BatchToSpaceNd and SpaceToBatchNd in Opaque Delegate
Signed-off-by: Kevin May <kevin.may@arm.com> Change-Id: I38304abce1a417bb69aced2a5b38e976ea0cbbc0
-rw-r--r--delegate/CMakeLists.txt2
-rw-r--r--delegate/opaque/CMakeLists.txt1
-rw-r--r--delegate/opaque/src/BatchSpace.hpp260
-rw-r--r--delegate/opaque/src/armnn_delegate.cpp12
4 files changed, 275 insertions, 0 deletions
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index ab61337dce..7dc89d79cf 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -259,6 +259,8 @@ if(BUILD_UNIT_TESTS)
common/src/test/DelegateTestInterpreterUtils.hpp
opaque/src/test/ArmnnOpaqueDelegateTest.cpp
opaque/src/test/DelegateTestInterpreter.cpp
+ test/BatchSpaceTest.cpp
+ test/BatchSpaceTestHelper.hpp
test/CastTest.cpp
test/CastTestHelper.hpp
test/ComparisonTest.cpp
diff --git a/delegate/opaque/CMakeLists.txt b/delegate/opaque/CMakeLists.txt
index 39df124310..958dcf6014 100644
--- a/delegate/opaque/CMakeLists.txt
+++ b/delegate/opaque/CMakeLists.txt
@@ -8,6 +8,7 @@ list(APPEND armnnOpaqueDelegateObject_sources
include/armnn_delegate.hpp
include/Version.hpp
src/armnn_delegate.cpp
+ src/BatchSpace.hpp
src/Convolution.hpp
src/Redefine.hpp
src/SharedFunctions.cpp
diff --git a/delegate/opaque/src/BatchSpace.hpp b/delegate/opaque/src/BatchSpace.hpp
index e16969768e..c760a14f5e 100644
--- a/delegate/opaque/src/BatchSpace.hpp
+++ b/delegate/opaque/src/BatchSpace.hpp
@@ -2,3 +2,263 @@
// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+
+#include <OpaqueDelegateUtils.hpp>
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnOpaqueDelegate
+{
+
+TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ int numInputs = 3;
+ const int* inputTensors;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ int numOutputs = 0;
+ const int* outputTensors;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ const TfLiteOpaqueTensor* tfLiteBlockShapeTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
+ inputTensors[1]);
+ if (!IsValid(tfLiteContext, tfLiteBlockShapeTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ const TfLiteOpaqueTensor* tfLiteCropsTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]);
+ if (!IsValid(tfLiteContext, tfLiteCropsTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
+ outputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& blockShapeTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteBlockShapeTensor);
+ const armnn::TensorInfo& cropsTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteCropsTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+
+ // Copy memory into block and crops
+ std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
+ ::memcpy(blockShape.data(), TfLiteOpaqueTensorData(tfLiteBlockShapeTensor), blockShapeTensorInfo.GetNumBytes());
+
+ std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
+ std::memcpy(cropsVector.data(), TfLiteOpaqueTensorData(tfLiteCropsTensor), cropsTensorInfo.GetNumBytes());
+
+ size_t step = 2;
+ std::vector<std::pair<unsigned int, unsigned int>> crops;
+ for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
+ {
+ crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
+ }
+
+ // Make a descriptor
+ armnn::BatchToSpaceNdDescriptor descriptor;
+ descriptor.m_BlockShape = blockShape;
+ descriptor.m_Crops = crops;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+ // Check if supported
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("BATCH_TO_SPACE_ND",
+ tfLiteContext,
+ IsBatchToSpaceNdSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
+ // support for the operator
+ // If supported, VisitBatchToSpaceNdOperator will be called again to add the layer to the network as seen below
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a BatchToSpace layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+}
+
+TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
+ TfLiteOpaqueContext* tfLiteContext,
+ TfLiteOpaqueNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ int numInputs = 3;
+ const int* inputTensors;
+ if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ int numOutputs = 0;
+ const int* outputTensors;
+ if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+ {
+ TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+ tfLiteContext,
+ "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+ nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,inputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ const TfLiteOpaqueTensor* tfLiteBlockShapeTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
+ inputTensors[1]);
+ if (!IsValid(tfLiteContext, tfLiteBlockShapeTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ const TfLiteOpaqueTensor* tfLitePadListTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
+ inputTensors[2]);
+ if (!IsValid(tfLiteContext, tfLitePadListTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+ const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
+ outputTensors[0]);
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& blockShapeTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteBlockShapeTensor);
+ const armnn::TensorInfo& padListTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLitePadListTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+ std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
+ std::memcpy(blockShape.data(),
+ TfLiteOpaqueTensorData(tfLiteBlockShapeTensor),
+ blockShapeTensorInfo.GetNumBytes());
+
+ std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
+ std::memcpy(padListVector.data(),
+ TfLiteOpaqueTensorData(tfLitePadListTensor),
+ padListTensorInfo.GetNumBytes());
+
+ size_t step = 2;
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
+ {
+ padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
+ }
+
+ armnn::SpaceToBatchNdDescriptor descriptor;
+ descriptor.m_BlockShape = blockShape;
+ descriptor.m_PadList = padList;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+ // Check if supported
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SPACE_TO_BATCH_ND",
+ tfLiteContext,
+ IsSpaceToBatchNdSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
+ // support for the operator
+ // If supported, VisitSpaceToBatchNdOperator will be called again to add the layer to the network as seen below
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a SpaceToBatch layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToBatchNdLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+}
+
+} // namespace \ No newline at end of file
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index c305c4020c..7f3d8cf9e9 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -622,6 +622,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
{
switch (TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration))
{
+ case kTfLiteBuiltinBatchToSpaceNd:
+ return VisitBatchToSpaceNdOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinBatchToSpaceNd);
case kTfLiteBuiltinCast:
return VisitCastOperator(delegateData,
tfLiteContext,
@@ -688,6 +694,12 @@ TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
tfLiteNode,
nodeIndex,
kTfLiteBuiltinNotEqual);
+ case kTfLiteBuiltinSpaceToBatchNd:
+ return VisitSpaceToBatchNdOperator(delegateData,
+ tfLiteContext,
+ tfLiteNode,
+ nodeIndex,
+ kTfLiteBuiltinSpaceToBatchNd);
default:
return kTfLiteError;
}