ArmNN
 22.05.01
ConvertConstantsHalfToFloatTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <TestUtils.hpp>
7 
8 #include <Optimizer.hpp>
9 
10 #include <doctest/doctest.h>
11 
12 TEST_SUITE("Optimizer")
13 {
14 using namespace armnn::optimizations;
15 
16 TEST_CASE("ConvertConstantsHalfToFloatTest")
17 {
18  armnn::Graph graph;
19 
20  const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::Float32);
21 
22  // Create the half precision input data
23  unsigned int dims[] = { 4, 1, 1, 1 };
24  std::vector<float> convWeightsData{ 1.f, 2.f, 3.f, 4.f };
25  std::vector<uint16_t> halfWeights(4);
26  armnnUtils::FloatingPointConverter::ConvertFloat32To16(convWeightsData.data(), convWeightsData.size(),
27  halfWeights.data());
28  armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float16, 0.0f, 0, true), halfWeights);
29 
30  //Create the simple test network
31  auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
32  input->GetOutputSlot().SetTensorInfo(info);
33 
35  fc->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights);
36  fc->GetOutputSlot().SetTensorInfo(info);
37 
38  auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
39 
40  //Connect up the layers
41  input->GetOutputSlot().Connect(fc->GetInputSlot(0));
42  fc->GetOutputSlot().Connect(output->GetInputSlot(0));
43 
44  //Test the tensor info is correct.
45  CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
46 
47  // Run the optimizer
49 
50  //Test the tensor info is correct.
51  CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
52 
53  // Now test the data matches float32 data
54  const float* data = fc->m_Weight->GetConstTensor<float>();
55  CHECK(1.0f == data[0]);
56  CHECK(2.0f == data[1]);
57  CHECK(3.0f == data[2]);
58  CHECK(4.0f == data[3]);
59 }
60 
61 }
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:425
int Connect(InputSlot &destination)
Definition: Layer.cpp:112
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
TEST_SUITE("Optimizer")
This layer represents a fully connected operation.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
A FullyConnectedDescriptor for the FullyConnectedLayer.
static void ConvertFloat32To16(const float *srcFloat32Buffer, size_t numElements, void *dstFloat16Buffer)
Converts a buffer of FP32 values to FP16, and stores in the given dstFloat16Buffer.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
ConvertConstants< Float16ToFloat32, IsFloat32Layer > ConvertConstantsHalfToFloat
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:87
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:324