aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/Encoders.hpp
blob: 4fe202f0bf4e58078558009f57ed71bd3bc79e0d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//

#pragma once

#include "BaseIterator.hpp"

#include <armnnUtils/TensorUtils.hpp>

#include <boost/assert.hpp>

namespace armnn
{

template<typename T>
inline std::unique_ptr<Encoder<T>> MakeEncoder(const TensorInfo& info, void* data = nullptr);

template<>
inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void* data)
{
    switch(info.GetDataType())
    {
        case armnn::DataType::QuantizedSymm8PerAxis:
        {
            std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
            return std::make_unique<QSymm8PerAxisEncoder>(
                static_cast<int8_t*>(data),
                params.second,
                params.first);
        }
        case armnn::DataType::QAsymmU8:
        {
            return std::make_unique<QASymm8Encoder>(
                static_cast<uint8_t*>(data),
                info.GetQuantizationScale(),
                info.GetQuantizationOffset());
        }
        case DataType::QSymmS8:
        {
            return std::make_unique<QSymmS8Encoder>(
                    static_cast<int8_t*>(data),
                    info.GetQuantizationScale(),
                    info.GetQuantizationOffset());
        }
        case armnn::DataType::QSymmS16:
        {
            return std::make_unique<QSymm16Encoder>(
                static_cast<int16_t*>(data),
                info.GetQuantizationScale(),
                info.GetQuantizationOffset());
        }
        case armnn::DataType::Signed32:
        {
            return std::make_unique<Int32Encoder>(static_cast<int32_t*>(data));
        }
        case armnn::DataType::Float16:
        {
            return std::make_unique<Float16Encoder>(static_cast<Half*>(data));
        }
        case armnn::DataType::Float32:
        {
            return std::make_unique<Float32Encoder>(static_cast<float*>(data));
        }
        default:
        {
            BOOST_ASSERT_MSG(false, "Unsupported target Data Type!");
            break;
        }
    }
    return nullptr;
}

template<>
inline std::unique_ptr<Encoder<bool>> MakeEncoder(const TensorInfo& info, void* data)
{
    switch(info.GetDataType())
    {
        case armnn::DataType::Boolean:
        {
            return std::make_unique<BooleanEncoder>(static_cast<uint8_t*>(data));
        }
        default:
        {
            BOOST_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
            break;
        }
    }
    return nullptr;
}

} //namespace armnn