aboutsummaryrefslogtreecommitdiff
path: root/src/backends/tosaCommon/operatorMappings/Conv2dOperator.cpp
blob: 9c095d627f2caf7f88efba72657f2ae39d816900 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
//
// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

#include "Conv2dOperator.hpp"

TosaSerializationBasicBlock* ConvertConv2dToTosaOperator(const Layer* layer,
                                                         const std::vector<const TensorInfo*>& inputs,
                                                         const std::vector<const TensorInfo*>& outputs,
                                                         const Convolution2dDescriptor* conv2dDescriptor)
{
    std::vector<std::string> inputNames;
    std::string outputName = std::string("output0_");
    std::string blockName  = std::string("Op_CONV2D_block_") + GetUniqueTosaMappingID();

    // Set input names for validation purposes only.
    if(layer == nullptr)
    {
        inputNames.emplace_back("input0_");
        inputNames.emplace_back("input1_");
        if(conv2dDescriptor->m_BiasEnabled)
        {
            inputNames.emplace_back("input2_");
        }
    }
    else
    {
        // If a layer is present then the block will be used for execution, so input and output names need to be
        // determined using the previous and following layers so the graph is connected correctly.
        // For validation this doesn't matter.
        for (uint32_t i = 0; i < inputs.size(); ++i)
        {
            // Get the layer connected to the input slot and determine unique layer name.
            Layer& connectedLayer = layer->GetInputSlot(i).GetConnectedOutputSlot()->GetOwningLayer();

            std::string inputName = GenerateUniqueName(connectedLayer, i);
            inputNames.push_back(inputName);
        }

        // Get the layer connected to the output slot and determine unique layer name.
        Layer& connectedLayer = layer->GetOutputSlot().GetConnection(0)->GetOwningLayer();

        outputName = GenerateUniqueName(connectedLayer, 0);
    }

    std::vector<TosaSerializationTensor*> tensors;
    std::vector<TosaSerializationOperator*> operators;

    // Setup input Tensor
    std::vector<int32_t> inputShape0 = GetTosaTensorShape(inputs[0]->GetShape());
    DType inputDType0 = ArmNNToDType(inputs[0]->GetDataType());

    tensors.push_back(new TosaSerializationTensor(inputNames[0], inputShape0, inputDType0, {}));

    // Only add input tensors if weights and bias are not constant or if running validation.
    // Constant tensors will be created in the ConvertConstantToTosaOperator function.
    if(!inputs[1]->IsConstant() || layer == nullptr)
    {
        std::vector<int32_t> inputShape1 = GetTosaTensorShape(inputs[1]->GetShape());
        DType inputDType1 = ArmNNToDType(inputs[1]->GetDataType());

        tensors.push_back(new TosaSerializationTensor(inputNames[1], inputShape1, inputDType1, {}));
    }

    if(conv2dDescriptor->m_BiasEnabled)
    {
        if(!inputs[2]->IsConstant() || layer == nullptr)
        {
            std::vector<int32_t> inputShape2 = GetTosaTensorShape(inputs[2]->GetShape());
            DType inputDType2 = ArmNNToDType(inputs[2]->GetDataType());

            tensors.push_back(new TosaSerializationTensor(inputNames[2], inputShape2, inputDType2, {}));
        }
    }
    else
    {
        // If bias is disabled, create a constant bias of 0 as three inputs are required.
        std::string constantName = std::string("constant_") + GetUniqueTosaMappingID();

        operators.push_back(new TosaSerializationOperator(Op_CONST, Attribute_NONE, nullptr, {}, {constantName}));

        std::vector<uint8_t> uint8Data;
        std::vector<float> data = { 0.0 };

        TosaSerializationHandler::ConvertF32toU8(data, uint8Data);

        tensors.push_back(new TosaSerializationTensor(constantName, {1}, DType_FP32, uint8Data));
        inputNames.emplace_back(constantName);
    }

    // Setup Output Tensor
    std::vector<int32_t> outputShape0 = GetTosaTensorShape(outputs[0]->GetShape());
    DType outputDType0 = ArmNNToDType(outputs[0]->GetDataType());

    tensors.push_back(new TosaSerializationTensor(outputName, outputShape0, outputDType0, {}));

    // Set up CONV2D operator
    std::vector<int> pad = {static_cast<int>(conv2dDescriptor->m_PadTop),
                            static_cast<int>(conv2dDescriptor->m_PadBottom),
                            static_cast<int>(conv2dDescriptor->m_PadLeft),
                            static_cast<int>(conv2dDescriptor->m_PadRight)};
    std::vector<int> stride = {static_cast<int>(conv2dDescriptor->m_StrideY),
                               static_cast<int>(conv2dDescriptor->m_StrideX)};
    std::vector<int> dilation = {static_cast<int>(conv2dDescriptor->m_DilationY),
                                 static_cast<int>(conv2dDescriptor->m_DilationX)};
    TosaConvAttribute attribute(pad, dilation, stride, 0, 0, ArmNNToDType(inputs[0]->GetDataType()));

    auto* op = new TosaSerializationOperator(Op_CONV2D,
                                             Attribute_ConvAttribute,
                                             &attribute,
                                             inputNames,
                                             {outputName});
    operators.push_back(op);

    // operatorInputNames/operatorOutputNames ends up being the same as
    // blockInputNames/blockOutputNames for one-to-one ArmNN to TOSA mappings
    return new TosaSerializationBasicBlock(blockName,     // name
                                           operators,     // operators
                                           tensors,       // tensors
                                           inputNames,    // inputs
                                           {outputName}); // outputs
}