aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations/ConvertConstDequantisationLayersToConstLayersTest.cpp
blob: 926ac2d26dc33e2efe1f4c42339fad168f8ef3f7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
//
// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

#include "LayersFwd.hpp"
#include <Network.hpp>
#include <TestUtils.hpp>
#include <doctest/doctest.h>
#include <armnn/backends/TensorHandle.hpp>
#include <Optimizer.hpp>

TEST_SUITE("Optimizer")
{
using namespace armnn;
using namespace armnn::optimizations;

TEST_CASE("ConvertConstFloat16DequantizeToConstFloat32")
{
    Graph graph;
    const unsigned int shape[] = {1, 2, 2, 3};

    const TensorInfo constTensorInfo(4, shape, DataType::Float16, 1.0, 0, true);
    const TensorInfo outputDequantizeInfo(4, shape, DataType::Float32, 1.0, 0, true);

    ConstantLayer *constantLayer = graph.AddLayer<ConstantLayer>("constant");
    std::vector<float> constantValues(constTensorInfo.GetNumElements(), 4.5f);
    ConstTensor constTensor(constTensorInfo, constantValues.data());
    constantLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
    constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo);

    DequantizeLayer *dequantizeLayer = graph.AddLayer<DequantizeLayer>("dequantize");
    dequantizeLayer->GetOutputSlot().SetTensorInfo(outputDequantizeInfo);

    OutputLayer *output = graph.AddLayer<OutputLayer>(0, "output");

    // Connect up constant -> dequantize -> output
    constantLayer->GetOutputSlot().Connect(dequantizeLayer->GetInputSlot(0));
    dequantizeLayer->GetOutputSlot().Connect(output->GetInputSlot(0));

    auto checkConstantFloat16 = [](const armnn::Layer *const layer) -> bool {
        return IsLayerOfType<ConstantLayer>(layer) &&
               (layer->GetDataType() == DataType::Float16);
    };
    auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool {
        return IsLayerOfType<ConstantLayer>(layer) &&
               (layer->GetDataType() == DataType::Float32);
    };

    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                        checkConstantFloat16,
                        &IsLayerOfType<DequantizeLayer>,
                        &IsLayerOfType<OutputLayer>));

    armnn::Optimizer::Pass(graph, MakeOptimizations(ConvertConstDequantisationLayersToConstLayers()));

    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                        checkConstantFloat32,
                        &IsLayerOfType<OutputLayer>));
}

TEST_CASE("ConvertConstInt8DequantizeToConstFloat32")
{
    Graph graph;
    const unsigned int shape[] = {1, 2, 2, 3};

    const TensorInfo constTensorInfo(4, shape, DataType::QAsymmS8, 1.0, 0, true);
    const TensorInfo outputDequantizeInfo(4, shape, DataType::Float32, 1.0, 0, true);

    ConstantLayer *constantLayer = graph.AddLayer<ConstantLayer>("constant");
    std::vector<int8_t> constantValues(constTensorInfo.GetNumElements(), 5);
    ConstTensor constTensor(constTensorInfo, constantValues.data());
    constantLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(constTensor);
    constantLayer->GetOutputSlot().SetTensorInfo(constTensorInfo);

    DequantizeLayer *dequantizeLayer = graph.AddLayer<DequantizeLayer>("dequantize");
    dequantizeLayer->GetOutputSlot().SetTensorInfo(outputDequantizeInfo);

    OutputLayer *output = graph.AddLayer<OutputLayer>(0, "output");

    // Connect up constant -> dequantize -> output
    constantLayer->GetOutputSlot().Connect(dequantizeLayer->GetInputSlot(0));
    dequantizeLayer->GetOutputSlot().Connect(output->GetInputSlot(0));

    auto checkConstantQAsymmS8 = [](const armnn::Layer *const layer) -> bool {
        return IsLayerOfType<ConstantLayer>(layer) &&
               (layer->GetDataType() == DataType::QAsymmS8);
    };
    auto checkConstantFloat32 = [](const armnn::Layer *const layer) -> bool {
        return IsLayerOfType<ConstantLayer>(layer) &&
               (layer->GetDataType() == DataType::Float32);
    };

    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                        checkConstantQAsymmS8,
                        &IsLayerOfType<DequantizeLayer>,
                        &IsLayerOfType<OutputLayer>));

    armnn::Optimizer::Pass(graph, MakeOptimizations(ConvertConstDequantisationLayersToConstLayers()));

    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                        checkConstantFloat32,
                        &IsLayerOfType<OutputLayer>));
}
}