aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/QuantizationEndToEndTestImpl.hpp
blob: f5c2eea6013abdb5861aee4d8d6317ade59e1aa6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
//
// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

#pragma once

#include <CommonTestUtils.hpp>

#include <armnn/INetwork.hpp>

#include <ResolveType.hpp>

#include <doctest/doctest.h>

namespace
{

armnn::INetworkPtr CreateQuantizationNetwork(const armnn::TensorInfo& inputInfo,
                                             const armnn::TensorInfo& outputInfo)
{
    using namespace armnn;

    INetworkPtr network(INetwork::Create());

    IConnectableLayer *input= network->AddInputLayer(0, "input");
    IConnectableLayer *quantization = network->AddQuantizeLayer("quantization");
    IConnectableLayer *output = network->AddOutputLayer(0, "output");

    Connect(input, quantization, inputInfo, 0, 0);
    Connect(quantization, output, outputInfo, 0, 0);

    return network;
}

template<armnn::DataType ArmnnIType, armnn::DataType ArmnnOType,
        typename Tin = armnn::ResolveType<ArmnnIType>, typename Tout = armnn::ResolveType<ArmnnOType>>
void QuantizeEndToEndLayerTestImpl(const std::vector<armnn::BackendId>& backends,
                                   const armnn::TensorShape& tensorShape,
                                   const std::vector<Tin>& input,
                                   const std::vector<Tout>& expectedOutput,
                                   float scale,
                                   int32_t offset)
{
    using namespace armnn;

    TensorInfo inputInfo(tensorShape, ArmnnIType);
    TensorInfo outputInfo(tensorShape, ArmnnOType, scale, offset);

    inputInfo.SetConstant(true);

    // Builds up the structure of the network
    INetworkPtr net = CreateQuantizationNetwork(inputInfo, outputInfo);

    CHECK(net);

    const std::map<int, std::vector<Tin>> inputTensorData = { { 0, input } };
    const std::map<int, std::vector<Tout>> expectedOutputData = { { 0, expectedOutput } };

    EndToEndLayerTestImpl<ArmnnIType, ArmnnOType>(std::move(net), inputTensorData, expectedOutputData, backends);
}

template<armnn::DataType ArmnnOType, typename Tout = armnn::ResolveType<ArmnnOType>>
void QuantizationEndToEndFloat32(const std::vector<armnn::BackendId>& backends)
{
    using namespace armnn;

    const TensorShape tensorShape({ 1, 1, 1, 5 });

    std::vector<float> inputData = { 63.5f, 49.5f, 14.0f, 0.0f, 50.0f };

    float qScale = 0.5f;
    int32_t qOffset = 127;
    std::vector<Tout> expectedOutputData = armnnUtils::QuantizedVector<Tout>(inputData, qScale, qOffset);

    QuantizeEndToEndLayerTestImpl<DataType::Float32, ArmnnOType>(backends,
                                                                 tensorShape,
                                                                 inputData,
                                                                 expectedOutputData,
                                                                 qScale,
                                                                 qOffset);
};

template<armnn::DataType ArmnnOType, typename Tout = armnn::ResolveType<ArmnnOType>>
void QuantizationEndToEndFloat16(const std::vector<armnn::BackendId>& backends)
{
    using namespace armnn;
    using namespace half_float::literal;
    using Half = half_float::half;

    const TensorShape tensorShape({ 1, 1, 1, 5 });

    std::vector<float> floatInputData = { 63.f, 49.f, 14.f, 0.f, 50.f };
    std::vector<Half> inputData = { 63._h, 49._h, 14._h, 0._h, 50._h };

    float qScale = 0.25f;
    int32_t qOffset = 1;
    std::vector<Tout> expectedOutputData = armnnUtils::QuantizedVector<Tout>(floatInputData, qScale, qOffset);

    QuantizeEndToEndLayerTestImpl<DataType::Float16, ArmnnOType>(backends,
                                                                 tensorShape,
                                                                 inputData,
                                                                 expectedOutputData,
                                                                 qScale,
                                                                 qOffset);
};

}