From 549b9600a6eaf0727fa084465a75f173edf8f381 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Tue, 24 May 2022 11:32:07 +0100 Subject: Update 22.05 Doxygen Docs after updates to main Readme Signed-off-by: Nikhil Raj Change-Id: I56711772406a41ff81fa136a5fb6c59c9b9cf504 --- 22.05/_convert_constants_b_float_tests_8cpp.xhtml | 172 ++++++++++++++++++++++ 1 file changed, 172 insertions(+) create mode 100644 22.05/_convert_constants_b_float_tests_8cpp.xhtml (limited to '22.05/_convert_constants_b_float_tests_8cpp.xhtml') diff --git a/22.05/_convert_constants_b_float_tests_8cpp.xhtml b/22.05/_convert_constants_b_float_tests_8cpp.xhtml new file mode 100644 index 0000000000..b7831096c7 --- /dev/null +++ b/22.05/_convert_constants_b_float_tests_8cpp.xhtml @@ -0,0 +1,172 @@ + + + + + + + + + + + + + +ArmNN: src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp File Reference + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  22.05 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+ +
+
ConvertConstantsBFloatTests.cpp File Reference
+
+
+
#include <TestUtils.hpp>
+#include <BFloat16.hpp>
+#include <Optimizer.hpp>
+#include <doctest/doctest.h>
+
+

Go to the source code of this file.

+ + + + +

+Functions

 TEST_SUITE ("Optimizer")
 
+

Function Documentation

+ +

◆ TEST_SUITE()

+ +
+
+ + + + + + + + +
TEST_SUITE ("Optimizer" )
+
+ +

Definition at line 15 of file ConvertConstantsBFloatTests.cpp.

+ +

References Graph::AddLayer(), armnn::BFloat16, OutputSlot::Connect(), FloatingPointConverter::ConvertFloat32ToBFloat16(), armnn::Float32, Layer::GetOutputSlot(), armnn::info, FullyConnectedLayer::m_Weight, armnn::MakeOptimizations(), Optimizer::Pass(), and OutputSlot::SetTensorInfo().

+
16 {
17 using namespace armnn::optimizations;
18 
19 TEST_CASE("ConvertConstantsFloatToBFloatTest")
20 {
21  armnn::Graph graph;
22 
23  const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::BFloat16);
24 
25  // Create const tensor from fp32 data
26  unsigned int dims[] = { 4, 2, 1, 1 };
27  std::vector<float> floatWeights{ 0.0f, -1.0f,
28  3.8f, // 0x40733333 Round down
29  3.1055E+29f, // 0x707ADC3C Round up
30  9.149516E-10f, // 0x307B7FFF Round down
31  -3.8f, // 0xC0733333 Round down
32  -3.1055E+29f, // 0xF07ADC3C Round up
33  -9.149516E-10f // 0xB07B7FFF Round down
34  };
35  armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32, 0.0f, 0, true), floatWeights);
36 
37  // Create simple test network
38  auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
39  input->GetOutputSlot().SetTensorInfo(info);
40 
42  fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
43  fc->GetOutputSlot().SetTensorInfo(info);
44 
45  auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
46 
47  // Connect up the layers
48  input->GetOutputSlot().Connect(fc->GetInputSlot(0));
49  fc->GetOutputSlot().Connect(output->GetInputSlot(0));
50 
51  // Check tensor data type before conversion
52  CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
53 
54  // Run the optimizer
56 
57  // Check tensor data type after conversion
58  CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
59 
60  // Check whether data matches expected Bf16 data
61  const BFloat16* data = fc->m_Weight->GetConstTensor<BFloat16>();
62  CHECK(data[0] == BFloat16(0.0f));
63  CHECK(data[1] == BFloat16(-1.0f));
64  CHECK(data[2] == BFloat16(3.796875f)); // 0x4073
65  CHECK(data[3] == BFloat16(3.1072295E29f)); // 0x707B
66  CHECK(data[4] == BFloat16(9.131327E-10f)); // 0x307B
67  CHECK(data[5] == BFloat16(-3.796875f)); // 0xC073
68  CHECK(data[6] == BFloat16(-3.1072295E29f)); // 0xF07B
69  CHECK(data[7] == BFloat16(-9.131327E-10f)); // 0xB07B
70 }
71 
72 TEST_CASE("ConvertConstantsBFloatToFloatTest")
73 {
74  armnn::Graph graph;
75 
76  const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::Float32);
77 
78  // Create the BFloat16 precision input data
79  unsigned int dims[] = { 4, 2, 1, 1 };
80  std::vector<float> convWeightsData{ 0.f, -1.f,
81  3.796875f, // 0x4073
82  3.1072295E29f, // 0x707B
83  9.131327E-10f, // 0x307B
84  -3.796875f, // 0xC073
85  -3.1072295E29f, // 0xF07B
86  -9.131327E-10f // 0xB07B
87  };
88  std::vector<uint16_t> bfWeights(8);
89  armnnUtils::FloatingPointConverter::ConvertFloat32ToBFloat16(convWeightsData.data(), convWeightsData.size(),
90  bfWeights.data());
91  armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::BFloat16, 0.0f, 0, true), bfWeights);
92 
93  //Create the simple test network
94  auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
95  input->GetOutputSlot().SetTensorInfo(info);
96 
98  fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
99  fc->GetOutputSlot().SetTensorInfo(info);
100 
101  auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
102 
103  //Connect up the layers
104  input->GetOutputSlot().Connect(fc->GetInputSlot(0));
105  fc->GetOutputSlot().Connect(output->GetInputSlot(0));
106 
107  //Test the tensor info is correct.
108  CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
109 
110  // Run the optimizer
112 
113  //Test the tensor info is correct.
114  CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
115 
116  // Now test the data matches float32 data
117  const float* data = fc->m_Weight->GetConstTensor<float>();
118  CHECK(data[0] == 0.0f);
119  CHECK(data[1] == -1.0f);
120  CHECK(data[2] == 3.796875f);
121  CHECK(data[3] == 3.1072295E29f);
122  CHECK(data[4] == 9.131327E-10f);
123  CHECK(data[5] == -3.796875f);
124  CHECK(data[6] == -3.1072295E29f);
125  CHECK(data[7] == -9.131327E-10f);
126 }
127 
128 }
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
+ + +
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:425
+ +
int Connect(InputSlot &destination)
Definition: Layer.cpp:112
+
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
+
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
+
ConvertConstants< Float32ToBFloat16, IsBFloat16Layer > ConvertConstantsFloatToBFloat
+
This layer represents a fully connected operation.
+
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
+
ConvertConstants< BFloat16ToFloat32, IsFloat32Layer > ConvertConstantsBFloatToFloat
+
A FullyConnectedDescriptor for the FullyConnectedLayer.
+
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
+ + +
static void ConvertFloat32ToBFloat16(const float *srcFloat32Buffer, size_t numElements, void *dstBFloat16Buffer)
+ +
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
+
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:87
+
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:324
+ +
+
+
+
+
+ + + + -- cgit v1.2.1