ArmNN
 20.11
Fp32NetworkToBf16ConverterTests.cpp File Reference
#include "../TestUtils.hpp"
#include <Optimizer.hpp>
#include <boost/test/unit_test.hpp>

Go to the source code of this file.

Functions

 BOOST_AUTO_TEST_CASE (Fp32NetworkToBf16OptimizationNoConversionTest)
 
 BOOST_AUTO_TEST_CASE (Fp32NetworkToBf16OptimizationConv2DTest)
 
 BOOST_AUTO_TEST_CASE (Fp32NetworkToBf16OptimizationFullyConnectedTest)
 

Function Documentation

◆ BOOST_AUTO_TEST_CASE() [1/3]

BOOST_AUTO_TEST_CASE ( Fp32NetworkToBf16OptimizationNoConversionTest  )

Definition at line 15 of file Fp32NetworkToBf16ConverterTests.cpp.

References Graph::AddLayer(), Graph::cbegin(), Graph::cend(), CheckSequence(), OutputSlot::Connect(), armnn::Float32, Layer::GetOutputSlot(), armnn::MakeOptimizations(), Optimizer::Pass(), and OutputSlot::SetTensorInfo().

16 {
17  armnn::Graph graph;
18 
19  const armnn::TensorInfo infoFP32({ 2, 2, 1, 3 }, armnn::DataType::Float32);
20 
21  // Create the simple test network without Conv2D/FullyConnected.
22  auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
23  input->GetOutputSlot().SetTensorInfo(infoFP32);
24 
25  auto floor = graph.AddLayer<armnn::FloorLayer>("floor");
26  floor->GetOutputSlot().SetTensorInfo(infoFP32);
27 
28  auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
29 
30  // Connect up the layers
31  input->GetOutputSlot().Connect(floor->GetInputSlot(0));
32  floor->GetOutputSlot().Connect(output->GetInputSlot(0));
33 
34  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
35  &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
36 
37  // Run the optimizer
39 
40  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
41  &IsLayerOfType<armnn::FloorLayer>,
42  &IsLayerOfType<armnn::OutputLayer>));
43 }
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:172
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a floor operation.
Definition: FloorLayer.hpp:13
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:315
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:174
OptimizeForType< Layer, ConvertFp32NetworkToBf16Impl > Fp32NetworkToBf16Converter

◆ BOOST_AUTO_TEST_CASE() [2/3]

BOOST_AUTO_TEST_CASE ( Fp32NetworkToBf16OptimizationConv2DTest  )

Definition at line 45 of file Fp32NetworkToBf16ConverterTests.cpp.

References Graph::AddLayer(), armnn::BFloat16, Graph::cbegin(), Graph::cend(), CheckSequence(), OutputSlot::Connect(), armnn::Float32, TensorInfo::GetDataType(), Layer::GetOutputSlot(), Convolution2dLayer::m_Weight, armnn::MakeOptimizations(), Optimizer::Pass(), and OutputSlot::SetTensorInfo().

46 {
47  armnn::Graph graph;
48 
49  const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
50 
51  // Create const tensor fp32 data
52  unsigned int dims[] = { 4, 2, 1, 1 };
53  std::vector<float> floatWeights{ 0.0f, -1.0f,
54  3.8f, // 0x40733333 Round down
55  3.1055E+29f, // 0x707ADC3C Round up
56  9.149516E-10f, // 0x307B7FFF Round down
57  -3.8f, // 0xC0733333 Round down
58  -3.1055E+29f, // 0xF07ADC3C Round up
59  -9.149516E-10f // 0xB07B7FFF Round down
60  };
61  armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
62 
63  // Create const bias fp32 data
64  unsigned int biasDims[] {4};
65  std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
66  armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32), floatBias);
67 
68  // A network with Convolution2d layer
69  auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
70  input->GetOutputSlot().SetTensorInfo(infoFP32);
71 
73 
74  auto conv = graph.AddLayer<armnn::Convolution2dLayer>(descriptor, "conv2d");
75  conv->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
76  conv->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(bias);
77  conv->GetOutputSlot().SetTensorInfo(infoFP32);
78 
79  auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
80 
81  // Connect up the layers
82  input->GetOutputSlot().Connect(conv->GetInputSlot(0));
83  conv->GetOutputSlot().Connect(output->GetInputSlot(0));
84 
85  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
86  &IsLayerOfType<armnn::Convolution2dLayer>, &IsLayerOfType<armnn::OutputLayer>));
87 
88  // Run the optimizer
90 
91  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
92  &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::Convolution2dLayer>,
93  &IsLayerOfType<armnn::OutputLayer>));
94 
95  armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
96  armnn::TensorInfo outputTensor = conv->GetOutputSlot(0).GetTensorInfo();
97  BOOST_TEST((conv->GetDataType() == armnn::DataType::BFloat16));
98  BOOST_TEST((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
99  BOOST_TEST((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
100  BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16));
101  BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32));
102 
103  // Check whether data matches expected Bf16 data
104  armnn::BFloat16* data = conv->m_Weight->GetTensor<armnn::BFloat16>();
105  BOOST_CHECK(data[0] == armnn::BFloat16(0.0f));
106  BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f));
107  BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
108  BOOST_CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
109  BOOST_CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
110  BOOST_CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
111  BOOST_CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
112  BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
113 }
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:172
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
DataType GetDataType() const
Definition: Tensor.hpp:194
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:315
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:174
This layer represents a convolution 2d operation.
OptimizeForType< Layer, ConvertFp32NetworkToBf16Impl > Fp32NetworkToBf16Converter

◆ BOOST_AUTO_TEST_CASE() [3/3]

BOOST_AUTO_TEST_CASE ( Fp32NetworkToBf16OptimizationFullyConnectedTest  )

Definition at line 115 of file Fp32NetworkToBf16ConverterTests.cpp.

References Graph::AddLayer(), armnn::BFloat16, BOOST_AUTO_TEST_SUITE_END(), Graph::cbegin(), Graph::cend(), CheckSequence(), OutputSlot::Connect(), armnn::Float32, TensorInfo::GetDataType(), Layer::GetOutputSlot(), FullyConnectedLayer::m_Weight, armnn::MakeOptimizations(), Optimizer::Pass(), and OutputSlot::SetTensorInfo().

116 {
117  armnn::Graph graph;
118 
119  const armnn::TensorInfo infoFP32({ 2, 3, 8, 1 }, armnn::DataType::Float32);
120 
121  // Create const tensor fp32 data
122  unsigned int dims[] = { 4, 2, 1, 1 };
123  std::vector<float> floatWeights{ 0.0f, -1.0f,
124  3.8f, // 0x40733333 Round down
125  3.1055E+29f, // 0x707ADC3C Round up
126  9.149516E-10f, // 0x307B7FFF Round down
127  -3.8f, // 0xC0733333 Round down
128  -3.1055E+29f, // 0xF07ADC3C Round up
129  -9.149516E-10f // 0xB07B7FFF Round down
130  };
131  armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
132 
133  // Create const bias fp32 data
134  unsigned int biasDims[] {4};
135  std::vector<float> floatBias{ 1.0f, 2.0f, 3.0f, 4.0f };
136  armnn::ConstTensor bias(armnn::TensorInfo(1, biasDims, armnn::DataType::Float32), floatBias);
137 
138  // A network with FullyConnected layer
139  auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
140  input->GetOutputSlot().SetTensorInfo(infoFP32);
141 
143 
144  auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(descriptor, "fully");
145  fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
146  fc->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(bias);
147  fc->GetOutputSlot().SetTensorInfo(infoFP32);
148 
149  auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
150 
151  // Connect up the layers
152  input->GetOutputSlot().Connect(fc->GetInputSlot(0));
153  fc->GetOutputSlot().Connect(output->GetInputSlot(0));
154 
155  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
156  &IsLayerOfType<armnn::FullyConnectedLayer>, &IsLayerOfType<armnn::OutputLayer>));
157 
158  // Run the optimizer
160 
161  BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
162  &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::FullyConnectedLayer>,
163  &IsLayerOfType<armnn::OutputLayer>));
164 
165  armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
166  armnn::TensorInfo outputTensor = fc->GetOutputSlot(0).GetTensorInfo();
167  BOOST_TEST((fc->GetDataType() == armnn::DataType::BFloat16));
168  BOOST_TEST((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
169  BOOST_TEST((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
170  BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16));
171  BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32));
172 
173  // Check whether data matches expected Bf16 data
174  armnn::BFloat16* data = fc->m_Weight->GetTensor<armnn::BFloat16>();
175  BOOST_CHECK(data[0] == armnn::BFloat16(0.0f));
176  BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f));
177  BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
178  BOOST_CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
179  BOOST_CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
180  BOOST_CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
181  BOOST_CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
182  BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
183 }
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:172
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a fully connected operation.
DataType GetDataType() const
Definition: Tensor.hpp:194
A FullyConnectedDescriptor for the FullyConnectedLayer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:315
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:174
OptimizeForType< Layer, ConvertFp32NetworkToBf16Impl > Fp32NetworkToBf16Converter