aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/Decoders.hpp
blob: faabdcdb3f6d120fce7bac20d90b395c450a2534 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//

#pragma once

#include "BaseIterator.hpp"

#include <armnnUtils/FloatingPointConverter.hpp>
#include <armnnUtils/TensorUtils.hpp>

#include <boost/assert.hpp>

namespace armnn
{

namespace
{

inline std::unique_ptr<Decoder<float>> MakeSigned32PerAxisDecoder(const TensorInfo& info, const void* data)
{
    auto params = armnnUtils::GetPerAxisParams(info);
    return std::make_unique<ScaledInt32PerAxisDecoder>(
        static_cast<const int32_t*>(data),
        params.second,
        params.first);
}

inline std::unique_ptr<Decoder<float>> MakeSigned32Decoder(const TensorInfo& info, const void* data)
{
    if(info.HasMultipleQuantizationScales())
    {
        // NOTE: If we have multiple quantization scales, we create a ScaledInt32PerAxisDecoder.
        // This will be used to decode per-axis quantized convolution biases.
        return MakeSigned32PerAxisDecoder(info, data);
    }
    else
    {
        if (info.GetQuantizationDim().has_value())
        {
            // NOTE: Even though we only have a single quantization scale, if the quantization
            // dimension is set, the tensor has per-axis quantization and we need to create a
            // ScaledInt32PerAxisDecoder
            return MakeSigned32PerAxisDecoder(info, data);
        }

        const float scale = info.GetQuantizationScale();
        if (scale == 0.f)
        {
            // NOTE:: If no quantization scale is set, we create an Int32Decoder, which simply
            // casts the int value to float. This will be used for any INT32 data other than
            // convolution biases.
            return std::make_unique<Int32Decoder>(static_cast<const int32_t*>(data));
        }

        // NOTE: If we only have a single (non-zero) quantization scale and no quantization
        // dimension is specified, we need to create a ScaledInt32Decoder. This will be used
        // to decode per-tensor quantized convolution biases.
        return std::make_unique<ScaledInt32Decoder>(static_cast<const int32_t*>(data), scale);
    }
}

} // anonymous namespace

template<typename T>
inline std::unique_ptr<Decoder<T>> MakeDecoder(const TensorInfo& info, const void* data = nullptr);

template<>
inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const void* data)
{
    switch(info.GetDataType())
    {
        case armnn::DataType::QuantizedSymm8PerAxis:
        {
            std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
            return std::make_unique<QSymm8PerAxisDecoder>(
                static_cast<const int8_t*>(data),
                params.second,
                params.first);
        }
        case DataType::QAsymmU8:
        {
            return std::make_unique<QASymm8Decoder>(
                static_cast<const uint8_t*>(data),
                info.GetQuantizationScale(),
                info.GetQuantizationOffset());
        }
        case DataType::QSymmS16:
        {
            return std::make_unique<QSymm16Decoder>(
                static_cast<const int16_t*>(data),
                info.GetQuantizationScale(),
                info.GetQuantizationOffset());
        }
        case DataType::Float16:
        {
            return std::make_unique<Float16Decoder>(static_cast<const Half*>(data));
        }
        case DataType::Float32:
        {
            return std::make_unique<Float32Decoder>(static_cast<const float*>(data));
        }
        case DataType::Signed32:
        {
            return MakeSigned32Decoder(info, data);
        }
        case DataType::QSymmS8:
        {
            return std::make_unique<QSymmS8Decoder>(
            static_cast<const int8_t*>(data),
            info.GetQuantizationScale(),
            info.GetQuantizationOffset());
        }
        default:
        {
            BOOST_ASSERT_MSG(false, "Unsupported Data Type!");
            break;
        }
    }
    return nullptr;
}

} //namespace armnn