aboutsummaryrefslogtreecommitdiff
path: root/delegate/src/Transpose.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'delegate/src/Transpose.hpp')
-rw-r--r--delegate/src/Transpose.hpp81
1 files changed, 78 insertions, 3 deletions
diff --git a/delegate/src/Transpose.hpp b/delegate/src/Transpose.hpp
index 2d5823da84..c44c0d2773 100644
--- a/delegate/src/Transpose.hpp
+++ b/delegate/src/Transpose.hpp
@@ -9,6 +9,7 @@
#include <tensorflow/lite/c/builtin_op_data.h>
#include <tensorflow/lite/c/common.h>
#include <tensorflow/lite/minimal_logging.h>
+#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
namespace armnnDelegate
{
@@ -17,9 +18,83 @@ TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
TfLiteContext* tfLiteContext,
TfLiteNode* tfLiteNode,
int nodeIndex,
- int32_t operatorCode)
+ int32_t tfliteTransposeOperatorCode)
{
- return kTfLiteError;
-}
+ TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+ TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+ const TfLiteTensor *tfLiteTensors = tfLiteContext->tensors;
+ const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
+ if (IsDynamicTensor(tfLiteInputTensor0))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
+ "operator #%d node #%d: ",
+ tfliteTransposeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
+ if (IsDynamicTensor(tfLiteInputTensor1))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
+ "operator #%d node #%d: ",
+ tfliteTransposeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+ if (IsDynamicTensor(tfLiteOutputTensor))
+ {
+ TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
+ "TfLiteArmnnDelegate: Dynamic output tensors are not supported in "
+ "operator #%d node #%d: ",
+ tfliteTransposeOperatorCode, nodeIndex);
+ return kTfLiteError;
+ }
+
+ const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
+ const armnn::TensorInfo& inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1); //permutation tensor
+ const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+ auto* permTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteInputTensor1);
+ auto numEl = tfLiteInputTensor1.dims->data[0];
+
+ ARMNN_ASSERT( numEl <= armnn::MaxNumOfTensorDimensions);
+ ARMNN_ASSERT( tfLiteInputTensor1.dims->size == 1); // ensure only single dimension to the permutation tensor
+ armnn::TransposeDescriptor descriptor(armnn::PermutationVector(
+ reinterpret_cast<const armnn::PermutationVector::ValueType *> (permTensorDataPtr),
+ (armnn::PermutationVector::SizeType)(numEl)));
+
+ bool isSupported = false;
+
+ auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
+ {
+ FORWARD_LAYER_SUPPORT_FUNC(__func__,
+ tfLiteContext,
+ IsTransposeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ inputTensorInfo0,
+ outputTensorInfo,
+ descriptor);
+ };
+
+ if (!delegateData.m_Network)
+ {
+ validateFunc(outputTensorInfo, isSupported);
+ return isSupported ? kTfLiteOk : kTfLiteError;
+ }
+
+ armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor);
+ ARMNN_ASSERT(transposeLayer != nullptr);
+ ARMNN_ASSERT(transposeLayer->GetNumInputSlots() == 1); // permutation vector given to descriptor object
+
+ armnn::IOutputSlot& outputSlot = transposeLayer->GetOutputSlot(0);
+ outputSlot.SetTensorInfo(outputTensorInfo);
+
+ return Connect(transposeLayer, tfLiteNode, delegateData);
+}
} // namespace armnnDelegate