ArmNN
 21.02
Fp16SupportTest.cpp File Reference
#include <armnn/Descriptors.hpp>
#include <armnn/IRuntime.hpp>
#include <armnn/INetwork.hpp>
#include <Half.hpp>
#include <Graph.hpp>
#include <Optimizer.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/test/unit_test.hpp>
#include <set>

Go to the source code of this file.

Functions

 BOOST_AUTO_TEST_CASE (Fp16DataTypeSupport)
 
 BOOST_AUTO_TEST_CASE (Fp16AdditionTest)
 

Function Documentation

◆ BOOST_AUTO_TEST_CASE() [1/2]

BOOST_AUTO_TEST_CASE ( Fp16DataTypeSupport  )

Definition at line 24 of file Fp16SupportTest.cpp.

References Graph::AddLayer(), OutputSlot::Connect(), armnn::Float16, TensorInfo::GetDataType(), Layer::GetOutputSlot(), OutputSlot::GetTensorInfo(), and OutputSlot::SetTensorInfo().

25 {
26  Graph graph;
27 
28  Layer* const inputLayer1 = graph.AddLayer<InputLayer>(1, "input1");
29  Layer* const inputLayer2 = graph.AddLayer<InputLayer>(2, "input2");
30 
31  Layer* const additionLayer = graph.AddLayer<AdditionLayer>("addition");
32  Layer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
33 
34  TensorInfo fp16TensorInfo({1, 2, 3, 5}, armnn::DataType::Float16);
35  inputLayer1->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
36  inputLayer2->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(1));
37  additionLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
38 
39  inputLayer1->GetOutputSlot().SetTensorInfo(fp16TensorInfo);
40  inputLayer2->GetOutputSlot().SetTensorInfo(fp16TensorInfo);
41  additionLayer->GetOutputSlot().SetTensorInfo(fp16TensorInfo);
42 
43  BOOST_CHECK(inputLayer1->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
44  BOOST_CHECK(inputLayer2->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
45  BOOST_CHECK(additionLayer->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
46 }
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
DataType GetDataType() const
Definition: Tensor.hpp:194
This layer represents an addition operation.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
const TensorInfo & GetTensorInfo() const override
Definition: Layer.cpp:63

◆ BOOST_AUTO_TEST_CASE() [2/2]

BOOST_AUTO_TEST_CASE ( Fp16AdditionTest  )

Definition at line 48 of file Fp16SupportTest.cpp.

References BOOST_AUTO_TEST_SUITE_END(), IOutputSlot::Connect(), IRuntime::Create(), INetwork::Create(), armnn::Float16, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetOutputSlot(), armnn::GpuAcc, armnn::Optimize(), and IOutputSlot::SetTensorInfo().

49 {
50  using namespace half_float::literal;
51  // Create runtime in which test will run
53  IRuntimePtr runtime(IRuntime::Create(options));
54 
55  // Builds up the structure of the network.
56  INetworkPtr net(INetwork::Create());
57 
58  IConnectableLayer* inputLayer1 = net->AddInputLayer(0);
59  IConnectableLayer* inputLayer2 = net->AddInputLayer(1);
60  IConnectableLayer* additionLayer = net->AddAdditionLayer();
61  IConnectableLayer* outputLayer = net->AddOutputLayer(0);
62 
63  inputLayer1->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
64  inputLayer2->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(1));
65  additionLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
66 
67  //change to float16
68  TensorInfo fp16TensorInfo(TensorShape({4}), DataType::Float16);
69  inputLayer1->GetOutputSlot(0).SetTensorInfo(fp16TensorInfo);
70  inputLayer2->GetOutputSlot(0).SetTensorInfo(fp16TensorInfo);
71  additionLayer->GetOutputSlot(0).SetTensorInfo(fp16TensorInfo);
72 
73  // optimize the network
74  std::vector<BackendId> backends = {Compute::GpuAcc};
75  IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
76 
77  // Loads it into the runtime.
78  NetworkId netId;
79  runtime->LoadNetwork(netId, std::move(optNet));
80 
81  std::vector<Half> input1Data
82  {
83  1.0_h, 2.0_h, 3.0_h, 4.0_h
84  };
85 
86  std::vector<Half> input2Data
87  {
88  100.0_h, 200.0_h, 300.0_h, 400.0_h
89  };
90 
91  InputTensors inputTensors
92  {
93  {0,ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
94  {1,ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
95  };
96 
97  std::vector<Half> outputData(input1Data.size());
98  OutputTensors outputTensors
99  {
100  {0,Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
101  };
102 
103  // Does the inference.
104  runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
105 
106  // Checks the results.
107  BOOST_TEST(outputData == std::vector<Half>({ 101.0_h, 202.0_h, 303.0_h, 404.0_h})); // Add
108 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:26
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:340
int NetworkId
Definition: IRuntime.hpp:20
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:306
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1502
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:341
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
virtual int Connect(IInputSlot &destination)=0