aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/NetworkQuantizationScheme.hpp
blob: a5b7542748b8cfc42f4ca6bf67cfc81079c7bb8b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//

#pragma once

#include <armnn/Types.hpp>

#include <cmath>
#include <algorithm>

namespace armnn
{

using OffsetScalePair = std::pair<float, int>;

struct IQuantizationScheme
{
    virtual OffsetScalePair ComputeScheme(double min, double max) const = 0;

    virtual int NumBits() const = 0;

    virtual DataType GetDataType() const = 0;

    virtual ~IQuantizationScheme() {}
};

struct QAsymm8QuantizationScheme : IQuantizationScheme
{
    OffsetScalePair ComputeScheme(double min, double max) const override
    {
        if (min > max)
        {
            throw InvalidArgumentException("min > max will result in invalid quantization.");
        }

        double highest = (1 << NumBits()) - 1;

        min = std::min(0.0, min); // min <= 0.0
        max = std::max(0.0, max); // max >= 0.0

        // To avoid dividing by zero when quantizing a zero filled tensor
        if (min == 0.0 && max == 0.0)
        {
            max = 1.0;
        }

        // Assumes quantization range [0-highest]
        double scale = (max-min) / highest;
        double offset = -min / scale;

        // Clamp offset [0-highest]
        offset = std::max(0.0, std::min(highest, offset));

        return std::make_pair(static_cast<float>(scale), static_cast<int>(std::round(offset)));
    }

    int NumBits() const override { return 8; }

    DataType GetDataType() const override { return DataType::QAsymmU8; }
};

struct QSymmS8QuantizationScheme : IQuantizationScheme
{
    OffsetScalePair ComputeScheme(double min, double max) const override
    {
        if (min > max)
        {
            throw InvalidArgumentException("min > max will result in invalid quantization.");
        }

        // To avoid dividing by zero when quantizing a zero filled tensor
        if (min == 0.0 && max == 0.0)
        {
            max = 1.0;
        }

        double highest = (1 << (NumBits()-1)) - 1; // (numbits-1) accounts for the sign bit

        double extent = std::max(std::abs(min), std::abs(max));
        double scale = extent / highest;

        return std::make_pair(static_cast<float>(scale), 0);
    }

    int NumBits() const override { return 8; }

    DataType GetDataType() const override { return DataType::QSymmS8; }
};

struct QSymm16QuantizationScheme : IQuantizationScheme
{
    OffsetScalePair ComputeScheme(double min, double max) const override
    {
        if (min > max)
        {
            throw InvalidArgumentException("min > max will result in invalid quantization.");
        }

        // To avoid dividing by zero when quantizing a zero filled tensor
        if (min == 0.0 && max == 0.0)
        {
            max = 1.0;
        }

        double highest = (1 << (NumBits()-1)) - 1; // (numbits-1) accounts for the sign bit

        double extent = std::max(std::abs(min), std::abs(max));
        double scale = extent / highest;

        if(scale == 0.000457777642)
        {
            return std::make_pair(static_cast<float>(scale), 0);
        }
        return std::make_pair(static_cast<float>(scale), 0);

    }

    int NumBits() const override { return 16; }

    DataType GetDataType() const override { return DataType::QSymmS16; }
};

} // namespace armnn