aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
blob: b78a1bf20775297fd99be9ab4939855a578cd0f3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
//
// Copyright © 2020 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//

#include "../TestUtils.hpp"

#include <BFloat16.hpp>
#include <Optimizer.hpp>

#include <doctest/doctest.h>

using namespace armnn;

TEST_SUITE("Optimizer")
{
using namespace armnn::optimizations;

TEST_CASE("ConvertConstantsFloatToBFloatTest")
{
    armnn::Graph graph;

    const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::BFloat16);

    // Create const tensor from fp32 data
    unsigned int dims[] = { 4, 2, 1, 1 };
    std::vector<float> floatWeights{ 0.0f, -1.0f,
                                     3.8f, // 0x40733333 Round down
                                     3.1055E+29f, // 0x707ADC3C Round up
                                     9.149516E-10f, // 0x307B7FFF Round down
                                    -3.8f, // 0xC0733333 Round down
                                    -3.1055E+29f, // 0xF07ADC3C Round up
                                    -9.149516E-10f // 0xB07B7FFF Round down
                                   };
    armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);

    // Create simple test network
    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
    input->GetOutputSlot().SetTensorInfo(info);

    auto fc      = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
    fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
    fc->GetOutputSlot().SetTensorInfo(info);

    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");

    // Connect up the layers
    input->GetOutputSlot().Connect(fc->GetInputSlot(0));
    fc->GetOutputSlot().Connect(output->GetInputSlot(0));

    // Check tensor data type before conversion
    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);

    // Run the optimizer
    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToBFloat()));

    // Check tensor data type after conversion
    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);

    // Check whether data matches expected Bf16 data
    const BFloat16* data = fc->m_Weight->GetConstTensor<BFloat16>();
    CHECK(data[0] == BFloat16(0.0f));
    CHECK(data[1] == BFloat16(-1.0f));
    CHECK(data[2] == BFloat16(3.796875f)); // 0x4073
    CHECK(data[3] == BFloat16(3.1072295E29f)); // 0x707B
    CHECK(data[4] == BFloat16(9.131327E-10f)); // 0x307B
    CHECK(data[5] == BFloat16(-3.796875f)); // 0xC073
    CHECK(data[6] == BFloat16(-3.1072295E29f)); // 0xF07B
    CHECK(data[7] == BFloat16(-9.131327E-10f)); // 0xB07B
}

TEST_CASE("ConvertConstantsBFloatToFloatTest")
{
    armnn::Graph graph;

    const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::Float32);

    // Create the BFloat16 precision input data
    unsigned int dims[] = { 4, 2, 1, 1 };
    std::vector<float> convWeightsData{ 0.f, -1.f,
                                        3.796875f, // 0x4073
                                        3.1072295E29f, // 0x707B
                                        9.131327E-10f, // 0x307B
                                       -3.796875f, // 0xC073
                                       -3.1072295E29f, // 0xF07B
                                       -9.131327E-10f // 0xB07B
                                       };
    std::vector<uint16_t> bfWeights(8);
    armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(convWeightsData.data(), convWeightsData.size(),
                                                                 bfWeights.data());
    armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::BFloat16), bfWeights);

    //Create the simple test network
    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
    input->GetOutputSlot().SetTensorInfo(info);

    auto fc      = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
    fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
    fc->GetOutputSlot().SetTensorInfo(info);

    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");

    //Connect up the layers
    input->GetOutputSlot().Connect(fc->GetInputSlot(0));
    fc->GetOutputSlot().Connect(output->GetInputSlot(0));

    //Test the tensor info is correct.
    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);

    // Run the optimizer
    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsBFloatToFloat()));

    //Test the tensor info is correct.
    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);

    // Now test the data matches float32 data
    const float* data = fc->m_Weight->GetConstTensor<float>();
    CHECK(data[0] == 0.0f);
    CHECK(data[1] == -1.0f);
    CHECK(data[2] == 3.796875f);
    CHECK(data[3] == 3.1072295E29f);
    CHECK(data[4] == 9.131327E-10f);
    CHECK(data[5] == -3.796875f);
    CHECK(data[6] == -3.1072295E29f);
    CHECK(data[7] == -9.131327E-10f);
}

}