aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/optimizations/ConvertConstDequantisationLayersToConstLayers.hpp
blob: 16314dc0d0711d6be47c3a8fb32a33ba9d96b287 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
//
// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once

#include "Optimization.hpp"
#include "NetworkUtils.hpp"

namespace armnn
{
namespace optimizations
{

class ConvertConstDequantisationLayersToConstLayersImpl
{
public:
    void Run(Graph& graph, InputSlot& connection) const
    {
        Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
        Layer& child = connection.GetOwningLayer();

        ARMNN_ASSERT(base.GetType() == LayerType::Constant);
        ARMNN_ASSERT(child.GetType() == LayerType::Dequantize);

        ReplaceConstDequantisationLayer(graph,
                                        PolymorphicDowncast<ConstantLayer*>(&base),
                                        PolymorphicDowncast<DequantizeLayer*>(&child));

    }
protected:
    ConvertConstDequantisationLayersToConstLayersImpl() = default;
    ~ConvertConstDequantisationLayersToConstLayersImpl() = default;
private:

    static void ReplaceConstDequantisationLayer(Graph& graph,
                                                ConstantLayer* constantLayer,
                                                DequantizeLayer* dequantizeLayer)
    {
        IgnoreUnused(graph);
        /**
         * This optimisation is to find situations where a constant set of inputs is being provided to a Dequantization
         * layer. In this case we don't want the overhead of Dequantizing the values on every inference, instead we
         * want to Dequantize them once and store them in a Const layer to be used everytime as they will not change.
         */
        TensorInfo constantInfo = constantLayer->GetOutputSlot(0).GetTensorInfo();
        TensorInfo inputDequantizeInfo = dequantizeLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
        TensorInfo outputDequantizeInfo = dequantizeLayer->GetOutputSlot(0).GetTensorInfo();

        ARMNN_ASSERT(constantLayer->GetNumOutputSlots() == 1);
        auto numConnections = constantLayer->GetOutputSlot(0).GetNumConnections();

        std::vector<float> newValues(outputDequantizeInfo.GetNumElements());
        if (constantInfo.GetDataType() == DataType::Float16 &&
            inputDequantizeInfo.GetDataType() == DataType::Float16 &&
            outputDequantizeInfo.GetDataType() == DataType::Float32)
        {
            armnnUtils::FloatingPointConverter::ConvertFloat16To32(constantLayer->m_LayerOutput->Map(true),
                                                                   outputDequantizeInfo.GetNumElements(),
                                                                   newValues.data());
        }
        else if (constantInfo.GetDataType() == DataType::QAsymmS8 &&
                inputDequantizeInfo.GetDataType() == DataType::QAsymmS8 &&
                outputDequantizeInfo.GetDataType() == DataType::Float32)
        {
            ConvertInt8To32(constantLayer->m_LayerOutput->Map(true),
                            outputDequantizeInfo.GetNumElements(),
                            newValues.data());
        }

        TensorInfo newInfo = outputDequantizeInfo;
        newInfo.SetConstant(true);
        ConstTensor newInput(newInfo, newValues);
        constantLayer->m_LayerOutput.reset(new ScopedTensorHandle(newInput));

        // Moves connections in dequantize output to the constant layer.
        // Dequantize layer will be removed if left unconnected.
        dequantizeLayer->GetOutputSlot().MoveAllConnections(constantLayer->GetOutputSlot());

        // Updating the output tensor
        constantLayer->GetOutputSlot(0).SetTensorInfo(newInfo);
        ARMNN_ASSERT(constantLayer->GetOutputSlot(0).GetTensorInfo().IsConstant() == true);

        // Set isConstant to true in all input tensor infos where constantLayer is now connected to
        for (unsigned int i = numConnections; i < constantLayer->GetOutputSlot(0).GetNumConnections(); ++i)
        {
            auto info = constantLayer->GetOutputSlot(0).GetConnection(i)->GetOwningLayer().GetInputSlot(0)
                    .GetConnectedOutputSlot()->GetTensorInfo();
            info.SetConstant();
            constantLayer->GetOutputSlot(0).GetConnection(i)->GetOwningLayer().GetInputSlot(0)
                    .GetConnectedOutputSlot()->SetTensorInfo(info);
        }
    }


static void ConvertInt8To32(const void* srcInt8Buffer,
                            size_t numElements,
                            float* dstFloat32Buffer)
{
    ARMNN_ASSERT(srcInt8Buffer != nullptr);
    ARMNN_ASSERT(dstFloat32Buffer != nullptr);

    const auto* pInt8 = static_cast<const int8_t*>(srcInt8Buffer);

    for (size_t i = 0; i < numElements; ++i)
    {
        dstFloat32Buffer[i] = pInt8[i];
    }
}

};

using ConvertConstDequantisationLayersToConstLayers
    = OptimizeForConnection<ConstantLayer,
                            DequantizeLayer,
                            ConvertConstDequantisationLayersToConstLayersImpl>;

} // namespace optimizations
} // namespace armnn