aboutsummaryrefslogtreecommitdiff
path: root/delegate/src/Unpack.hpp
blob: 4163163243bfc93038ccfbabcd71b36cc5346cc6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
//
// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

#pragma once

#include <armnn/utility/IgnoreUnused.hpp>

#include "DelegateUtils.hpp"

#include <tensorflow/lite/builtin_ops.h>
#include <tensorflow/lite/c/builtin_op_data.h>
#include <tensorflow/lite/c/common.h>
#include <tensorflow/lite/minimal_logging.h>
#include <numeric>

namespace armnnDelegate
{

TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
                                 TfLiteContext* tfLiteContext,
                                 TfLiteNode* tfLiteNode,
                                 int nodeIndex,
                                 int32_t operatorCode)
{
    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));

    const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
    const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];

    if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
    {
        return kTfLiteError;
    }

    // Get Unpack Axis
    const auto params = reinterpret_cast<TfLiteUnpackParams*>(tfLiteNode->builtin_data);

    const unsigned int unpackAxis = NonNegative(params->axis, nodeIndex);

    const armnn::TensorInfo& inputTensorInfo  = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);

    if (unpackAxis >= inputTensorInfo.GetNumDimensions())
    {
        TF_LITE_MAYBE_KERNEL_LOG(
            tfLiteContext,
            "TfLiteArmnnDelegate: The unpack axis #%d cannot be greater than or equal to "
            "the number of input dimensions #%d in operator #%d node #%d",
            unpackAxis, inputTensorInfo.GetNumDimensions(), operatorCode, nodeIndex);
        return kTfLiteError;
    }

    // Get Unpack Num
    unsigned int unpackNum = NonNegative(params->num, nodeIndex);

    // If num is not defined, automatically infer from the length of the dimension axis.
    if(unpackNum == 0)
    {
        unpackNum = inputTensorInfo.GetShape()[unpackAxis];
    }

    // If unpack number cannot be inferred and is still zero, return kTfLiteError.
    if(unpackNum == 0)
    {
        TF_LITE_MAYBE_KERNEL_LOG(
            tfLiteContext,
            "TfLiteArmnnDelegate: Number to unpack must greater than zero in operator #%d node #%d: ",
            operatorCode, nodeIndex);
        return kTfLiteError;
    }

    // Check outputs
    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, unpackNum, nodeIndex));


    auto inputDimSize = inputTensorInfo.GetNumDimensions();
    std::vector<unsigned int> unpackDimSizes(inputDimSize);

    // Add current input shape to unpackDimSizes
    for (unsigned int i = 0; i < inputDimSize; ++i)
    {
        unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
    }

    if (unpackDimSizes[unpackAxis] != unpackNum)
    {
        TF_LITE_MAYBE_KERNEL_LOG(
            tfLiteContext,
            "TfLiteArmnnDelegate: Number to unpack must be the same as length "
            "of the dimension to unpack along in operator #%d node #%d: ",
            operatorCode, nodeIndex);
        return kTfLiteError;
    }

    unpackDimSizes[unpackAxis] /= unpackNum;

    armnn::SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
    for (unsigned int j = 0; j < unpackNum; ++j)
    {
        // Set the size of the views.
        for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
        {
            splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
        }
        splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
    }

    std::vector<armnn::TensorInfo> outputs;
    for (unsigned int i = 0; i < unpackNum; ++i)
    {
        const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[i]];
        if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
        {
            return kTfLiteError;
        }
        outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor));
    }
    const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());

    // Determine the shape of the Splitter layer outputs for validation
    armnn::TensorShape splitOutShape = armnn::TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
                                                          unpackDimSizes.data());

    std::vector<armnn::TensorInfo> splitterOutputs;
    for (unsigned int outputIndex = 0; outputIndex < outputTensorInfos.size(); ++outputIndex)
    {
        splitterOutputs.push_back(armnn::TensorInfo(splitOutShape,
                                                    outputTensorInfos[outputIndex].get().GetDataType(),
                                                    outputTensorInfos[outputIndex].get().GetQuantizationScale(),
                                                    outputTensorInfos[outputIndex].get().GetQuantizationOffset()));
    }
    std::vector<std::reference_wrapper<armnn::TensorInfo>> splitterOutputTensorInfos(splitterOutputs.begin(),
                                                                                     splitterOutputs.end());

    if (!delegateData.m_Network)
    {
        // Check if splitter is supported
        bool isSupported = false;
        FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                   tfLiteContext,
                                   IsSplitterSupported,
                                   delegateData.m_Backends,
                                   isSupported,
                                   inputTensorInfo,
                                   splitterOutputTensorInfos,
                                   splitDesc);
        return isSupported ? kTfLiteOk : kTfLiteError;
    }

    // Create Reshape descriptor from the first outputTensorInfo to validate a single Reshape layer
    // Use this descriptor later when creating every ReshapeLayer as all Reshape Layers should be the same
    armnn::ReshapeDescriptor reshapeDescriptor;
    reshapeDescriptor.m_TargetShape = outputTensorInfos[0].get().GetShape();

    if (!delegateData.m_Network)
    {
        bool isSupported = false;
        FORWARD_LAYER_SUPPORT_FUNC(__func__,
                                   tfLiteContext,
                                   IsReshapeSupported,
                                   delegateData.m_Backends,
                                   isSupported,
                                   splitterOutputTensorInfos[0],
                                   outputTensorInfos[0],
                                   reshapeDescriptor);
        return isSupported ? kTfLiteOk : kTfLiteError;
    };

    std::string splitterLayerName("Unpack Splitter");

    armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc,
                                                                                       splitterLayerName.c_str());
    ARMNN_ASSERT(splitterLayer != nullptr);

    for (unsigned int k = 0; k < splitterLayer->GetNumOutputSlots(); ++k)
    {
        splitterLayer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
    }

    // Connect the input slots
    delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(splitterLayer->GetInputSlot(0));

    // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
    for (unsigned int outputIndex = 0; outputIndex < splitterLayer->GetNumOutputSlots(); ++outputIndex)
    {
        std::string reshapeLayerName("Unpack Reshape");
        armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor,
                                                                                         reshapeLayerName.c_str());
        ARMNN_ASSERT(reshapeLayer != nullptr);

        splitterLayer->GetOutputSlot(outputIndex).SetTensorInfo(splitterOutputTensorInfos[outputIndex]);
        splitterLayer->GetOutputSlot(outputIndex).Connect(reshapeLayer->GetInputSlot(0));

        armnn::TensorInfo outputTensorInfo  = outputTensorInfos[outputIndex];
        reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);

        armnn::IOutputSlot& slot = reshapeLayer->GetOutputSlot(0);

        delegateData.m_OutputSlotForNode[
            static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &slot;

    }

    return kTfLiteOk;
}

} // namespace armnnDelegate