1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
|
//
// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <armnn/utility/IgnoreUnused.hpp>
#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
#include <tensorflow/lite/c/common.h>
#include <tensorflow/lite/minimal_logging.h>
#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
namespace armnnDelegate
{
TfLiteStatus VisitTransposeOperator(DelegateData& delegateData,
TfLiteContext* tfLiteContext,
TfLiteNode* tfLiteNode,
int nodeIndex,
int32_t tfliteTransposeOperatorCode)
{
TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
const TfLiteTensor *tfLiteTensors = tfLiteContext->tensors;
const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
if (IsDynamicTensor(tfLiteInputTensor0))
{
TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
"TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
"operator #%d node #%d: ",
tfliteTransposeOperatorCode, nodeIndex);
return kTfLiteError;
}
const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
if (IsDynamicTensor(tfLiteInputTensor1))
{
TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
"TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
"operator #%d node #%d: ",
tfliteTransposeOperatorCode, nodeIndex);
return kTfLiteError;
}
const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
if (IsDynamicTensor(tfLiteOutputTensor))
{
TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
"TfLiteArmnnDelegate: Dynamic output tensors are not supported in "
"operator #%d node #%d: ",
tfliteTransposeOperatorCode, nodeIndex);
return kTfLiteError;
}
const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
const armnn::TensorInfo& inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1); //permutation tensor
const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
auto* permTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteInputTensor1);
unsigned int numEl = tfLiteInputTensor1.dims->data[0];
ARMNN_ASSERT( numEl <= armnn::MaxNumOfTensorDimensions);
ARMNN_ASSERT( tfLiteInputTensor1.dims->size == 1); // ensure only single dimension to the permutation tensor
armnn::TransposeDescriptor descriptor(armnn::PermutationVector(
reinterpret_cast<const armnn::PermutationVector::ValueType *> (permTensorDataPtr),
static_cast<armnn::PermutationVector::SizeType>(numEl)));
bool isSupported = false;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
FORWARD_LAYER_SUPPORT_FUNC(__func__,
tfLiteContext,
IsTransposeSupported,
delegateData.m_Backends,
isSupported,
inputTensorInfo0,
outputTensorInfo,
descriptor);
};
if (!delegateData.m_Network)
{
validateFunc(outputTensorInfo, isSupported);
return isSupported ? kTfLiteOk : kTfLiteError;
}
armnn::IConnectableLayer* transposeLayer = delegateData.m_Network->AddTransposeLayer(descriptor);
ARMNN_ASSERT(transposeLayer != nullptr);
ARMNN_ASSERT(transposeLayer->GetNumInputSlots() == 1); // permutation vector given to descriptor object
armnn::IOutputSlot& outputSlot = transposeLayer->GetOutputSlot(0);
outputSlot.SetTensorInfo(outputTensorInfo);
return Connect(transposeLayer, tfLiteNode, delegateData);
}
} // namespace armnnDelegate
|