ArmNN
 21.02
ConvertConstantsHalfToFloatTests.cpp File Reference
#include "../TestUtils.hpp"
#include <Optimizer.hpp>
#include <boost/test/unit_test.hpp>

Go to the source code of this file.

Functions

 BOOST_AUTO_TEST_CASE (ConvertConstantsHalfToFloatTest)
 

Function Documentation

◆ BOOST_AUTO_TEST_CASE()

BOOST_AUTO_TEST_CASE ( ConvertConstantsHalfToFloatTest  )

Definition at line 15 of file ConvertConstantsHalfToFloatTests.cpp.

References Graph::AddLayer(), BOOST_AUTO_TEST_SUITE_END(), OutputSlot::Connect(), FloatingPointConverter::ConvertFloat32To16(), armnn::Float16, armnn::Float32, Layer::GetOutputSlot(), FullyConnectedLayer::m_Weight, armnn::MakeOptimizations(), Optimizer::Pass(), and OutputSlot::SetTensorInfo().

16 {
17  armnn::Graph graph;
18 
19  const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::Float32);
20 
21  // Create the half precision input data
22  unsigned int dims[] = { 4, 1, 1, 1 };
23  std::vector<float> convWeightsData{ 1.f, 2.f, 3.f, 4.f };
24  std::vector<uint16_t> halfWeights(4);
25  armnnUtils::FloatingPointConverter::ConvertFloat32To16(convWeightsData.data(), convWeightsData.size(),
26  halfWeights.data());
27  armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float16), halfWeights);
28 
29  //Create the simple test network
30  auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
31  input->GetOutputSlot().SetTensorInfo(info);
32 
34  fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
35  fc->GetOutputSlot().SetTensorInfo(info);
36 
37  auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
38 
39  //Connect up the layers
40  input->GetOutputSlot().Connect(fc->GetInputSlot(0));
41  fc->GetOutputSlot().Connect(output->GetInputSlot(0));
42 
43  //Test the tensor info is correct.
44  BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
45 
46  // Run the optimizer
48 
49  //Test the tensor info is correct.
50  BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
51 
52  // Now test the data matches float32 data
53  float* data = fc->m_Weight->GetTensor<float>();
54  BOOST_CHECK(1.0f == data[0]);
55  BOOST_CHECK(2.0f == data[1]);
56  BOOST_CHECK(3.0f == data[2]);
57  BOOST_CHECK(4.0f == data[3]);
58 }
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a fully connected operation.
A FullyConnectedDescriptor for the FullyConnectedLayer.
static void ConvertFloat32To16(const float *srcFloat32Buffer, size_t numElements, void *dstFloat16Buffer)
Converts a buffer of FP32 values to FP16, and stores in the given dstFloat16Buffer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
ConvertConstants< Float16ToFloat32, IsFloat32Layer > ConvertConstantsHalfToFloat
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318