aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/NetworkQuantizerUtils.hpp
blob: 6176a9c794f36e9b4344d64b0fb978c8225ec169 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//

#pragma once

#include "NetworkQuantizationScheme.hpp"

#include <armnn/Tensor.hpp>
#include <armnn/TypesUtils.hpp>
#include <armnn/ILayerVisitor.hpp>

#include <utility>
#include <limits>

#include <boost/assert.hpp>

namespace armnn
{

template<typename srcType>
void QuantizeConstant(const srcType* src, uint8_t* dst, size_t numElements, float& scale, int& offset)
{
    BOOST_ASSERT(src);
    BOOST_ASSERT(dst);

    float min = std::numeric_limits<srcType>::max();
    float max = std::numeric_limits<srcType>::lowest();
    for (size_t i = 0; i < numElements; ++i)
    {
        min = std::min(min, src[i]);
        max = std::max(max, src[i]);
    }

    QAsymm8QuantizationScheme quantizationScheme;
    OffsetScalePair qParams = quantizationScheme.ComputeScheme(min, max);
    scale = qParams.first;
    offset = qParams.second;

    for (size_t i = 0; i < numElements; ++i)
    {
        dst[i] = armnn::Quantize<uint8_t>(src[i], scale, offset);
    }
}

ConstTensor CreateQuantizedConst(const ConstTensor& tensor, std::vector<uint8_t>& backing);

template <typename LayerContainer>
void VisitLayers(const LayerContainer& layerContainer, ILayerVisitor& visitor)
{
    visitor.StartVisit();
    for (auto layer : layerContainer)
    {
        layer->Accept(visitor);
    }
    visitor.FinishVisit();
}

} // namespace armnn