aboutsummaryrefslogtreecommitdiff
path: root/delegate/classic/src/BatchSpace.hpp
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2023-03-14 12:10:28 +0000
committerTeresa Charlin <teresa.charlinreyes@arm.com>2023-03-28 11:41:55 +0100
commitad1b3d7518429e2d16a2695d9b0bbf81b6565ac9 (patch)
treea5b8e1ad68a2437f007338f0b6195ca5ed2bddc3 /delegate/classic/src/BatchSpace.hpp
parent9cb3466b677a1048b8abb24661e92c4c83fdda04 (diff)
downloadarmnn-ad1b3d7518429e2d16a2695d9b0bbf81b6565ac9.tar.gz
IVGCVSW-7555 Restructure Delegate
* New folders created: * common is for common code where TfLite API is not used * classic is for existing delegate implementations * opaque is for new opaque delegate implementation, * tests is for shared between existing Delegate and Opaque Delegate which have test utils to work which delegate to use. * Existing delegate is built to libarmnnDelegate.so and opaque delegate is built as libarmnnOpaqueDelegate.so * Opaque structure is introduced but no API is added yet. * CmakeList.txt and delegate/CMakeList.txt have been modified and 2 new CmakeList.txt added * Rename BUILD_ARMNN_TFLITE_DELEGATE as BUILD_CLASSIC_DELEGATE * Rename BUILD_ARMNN_TFLITE_OPAQUE_DELEGATE as BUILD_OPAQUE_DELEGATE Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: Ib682b9ad0ac8d8acdc4ec6d9099bb0008a9fe8ed
Diffstat (limited to 'delegate/classic/src/BatchSpace.hpp')
-rw-r--r--delegate/classic/src/BatchSpace.hpp216
1 files changed, 216 insertions, 0 deletions
diff --git a/delegate/classic/src/BatchSpace.hpp b/delegate/classic/src/BatchSpace.hpp
new file mode 100644
index 0000000000..30c6dbfc15
--- /dev/null
+++ b/delegate/classic/src/BatchSpace.hpp
@@ -0,0 +1,216 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/builtin_ops.h>
+#include <tensorflow/lite/c/builtin_op_data.h>
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/minimal_logging.h>
+
+namespace armnnDelegate
+{
+
+TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteBlockShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteBlockShapeTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteCropsTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if (!IsValid(tfLiteContext, tfLiteCropsTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& blockShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBlockShapeTensor);
+ const armnn::TensorInfo& cropsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteCropsTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
+ ::memcpy(blockShape.data(), tfLiteBlockShapeTensor.data.data, blockShapeTensorInfo.GetNumBytes());
+
+ std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements());
+ std::memcpy(cropsVector.data(), tfLiteCropsTensor.data.data, cropsTensorInfo.GetNumBytes());
+
+ size_t step = 2;
+ std::vector<std::pair<unsigned int, unsigned int>> crops;
+ for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i)
+ {
+ crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]);
+ }
+
+ armnn::BatchToSpaceNdDescriptor descriptor;
+ descriptor.m_BlockShape = blockShape;
+ descriptor.m_Crops = crops;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+ // Check if supported
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("BATCH_TO_SPACE_ND",
+ tfLiteContext,
+ IsBatchToSpaceNdSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
+ // support for the operator
+ // If supported, VisitBatchToSpaceNdOperator will be called again to add the layer to the network as seen below
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a BatchToSpace layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData,
+ TfLiteContext* tfLiteContext,
+ TfLiteNode* tfLiteNode,
+ int nodeIndex,
+ int32_t operatorCode)
+{
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteBlockShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (!IsValid(tfLiteContext, tfLiteBlockShapeTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLitePadListTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
+ if (!IsValid(tfLiteContext, tfLitePadListTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+ {
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+ const armnn::TensorInfo& blockShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBlockShapeTensor);
+ const armnn::TensorInfo& padListTensorInfo = GetTensorInfoForTfLiteTensor(tfLitePadListTensor);
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
+
+ std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
+ std::memcpy(blockShape.data(), tfLiteBlockShapeTensor.data.data, blockShapeTensorInfo.GetNumBytes());
+
+ std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
+ std::memcpy(padListVector.data(), tfLitePadListTensor.data.data, padListTensorInfo.GetNumBytes());
+
+ size_t step = 2;
+ std::vector<std::pair<unsigned int, unsigned int>> padList;
+ for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
+ {
+ padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
+ }
+
+ armnn::SpaceToBatchNdDescriptor descriptor;
+ descriptor.m_BlockShape = blockShape;
+ descriptor.m_PadList = padList;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+ // Check if supported
+ bool isSupported = false;
+ armnn::BackendId setBackend;
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC("SPACE_TO_BATCH_ND",
+ tfLiteContext,
+ IsSpaceToBatchNdSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
+ // support for the operator
+ // If supported, VisitSpaceToBatchNdOperator will be called again to add the layer to the network as seen below
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ // Add a SpaceToBatch layer
+ armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToBatchNdLayer(descriptor);
+ layer->SetBackendId(setBackend);
+ ARMNN_ASSERT(layer != nullptr);
+
+ armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ // try to connect the Constant Inputs if there are any
+ if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+ {
+ return kTfLiteError;
+ }
+
+ // Connect
+ return Connect(layer, tfLiteNode, delegateData);
+}
+
+} // namespace armnnDelegate