ArmNN
 20.08
ConvertConstantsFloatToHalfTests.cpp File Reference
#include "../TestUtils.hpp"
#include <Optimizer.hpp>
#include <Half.hpp>
#include <boost/test/unit_test.hpp>

Go to the source code of this file.

Functions

 BOOST_AUTO_TEST_CASE (ConvertConstantsFloatToHalfTest)
 

Function Documentation

◆ BOOST_AUTO_TEST_CASE()

BOOST_AUTO_TEST_CASE ( ConvertConstantsFloatToHalfTest  )

Definition at line 18 of file ConvertConstantsFloatToHalfTests.cpp.

References Graph::AddLayer(), BOOST_AUTO_TEST_SUITE_END(), OutputSlot::Connect(), armnn::Float16, armnn::Float32, Layer::GetOutputSlot(), armnn::info, FullyConnectedLayer::m_Weight, armnn::MakeOptimizations(), Optimizer::Pass(), and OutputSlot::SetTensorInfo().

19 {
20  armnn::Graph graph;
21 
22  const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::Float16);
23 
24  // Create const tensor from fp32 data
25  unsigned int dims[] = { 4, 1, 1, 1 };
26  std::vector<float> floatWeights{ 1.0f, 2.0f, 3.0f, 4.0f };
27  armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
28 
29  // Create simple test network
30  auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
31  input->GetOutputSlot().SetTensorInfo(info);
32 
34  fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
35  fc->GetOutputSlot().SetTensorInfo(info);
36 
37  auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
38 
39  // Connect up the layers
40  input->GetOutputSlot().Connect(fc->GetInputSlot(0));
41  fc->GetOutputSlot().Connect(output->GetInputSlot(0));
42 
43  // Check tensor data type before conversion
44  BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
45 
46  // Run the optimizer
48 
49  // Check tensor data type after conversion
50  BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
51 
52  // Check whether data matches expected fp16 data
53  Half* data = fc->m_Weight->GetTensor<Half>();
54  BOOST_CHECK(data[0] == Half(1.0f));
55  BOOST_CHECK(data[1] == Half(2.0f));
56  BOOST_CHECK(data[2] == Half(3.0f));
57  BOOST_CHECK(data[3] == Half(4.0f));
58 }
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
std::unique_ptr< ScopedCpuTensorHandle > m_Weight
A unique pointer to store Weight values.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:403
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
ConvertConstants< Float32ToFloat16, IsFloat16Layer > ConvertConstantsFloatToHalf
This layer represents a fully connected operation.
A FullyConnectedDescriptor for the FullyConnectedLayer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:298
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:314
half_float::half Half
Definition: Half.hpp:16