diff options
author | Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> | 2018-10-12 13:00:55 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-22 16:57:53 +0100 |
commit | 3b278e9261bd0de67c82f7d6c36731f118124f52 (patch) | |
tree | 3750ee01827809141752302e94d4d25a21f88492 /src/backends/cl/test/Fp16SupportTest.cpp | |
parent | d3360cd490eafc76ceddb6760054bd80444179c1 (diff) | |
download | armnn-3b278e9261bd0de67c82f7d6c36731f118124f52.tar.gz |
IVGCVSW-1938: Move backend-specific source files to the corresponding backend
Change-Id: I558a9a007604afc55e536d877f8da7d0215cc9c3
Diffstat (limited to 'src/backends/cl/test/Fp16SupportTest.cpp')
-rw-r--r-- | src/backends/cl/test/Fp16SupportTest.cpp | 112 |
1 files changed, 112 insertions, 0 deletions
diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp new file mode 100644 index 0000000000..90bef3647b --- /dev/null +++ b/src/backends/cl/test/Fp16SupportTest.cpp @@ -0,0 +1,112 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include <armnn/ArmNN.hpp> +#include <armnn/Descriptors.hpp> +#include <armnn/IRuntime.hpp> +#include <armnn/INetwork.hpp> +#include <armnnUtils/Half.hpp> + +#include <Graph.hpp> +#include <Optimizer.hpp> +#include <backends/CpuTensorHandle.hpp> +#include <backends/test/QuantizeHelper.hpp> + +#include <boost/core/ignore_unused.hpp> +#include <boost/test/unit_test.hpp> + +#include <set> + +using namespace armnn; + +BOOST_AUTO_TEST_SUITE(Fp16Support) + +BOOST_AUTO_TEST_CASE(Fp16DataTypeSupport) +{ + Graph graph; + + Layer* const inputLayer1 = graph.AddLayer<InputLayer>(1, "input1"); + Layer* const inputLayer2 = graph.AddLayer<InputLayer>(2, "input2"); + + Layer* const additionLayer = graph.AddLayer<AdditionLayer>("addition"); + Layer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output"); + + TensorInfo fp16TensorInfo({1, 2, 3, 5}, armnn::DataType::Float16); + inputLayer1->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0)); + inputLayer2->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(1)); + additionLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + inputLayer1->GetOutputSlot().SetTensorInfo(fp16TensorInfo); + inputLayer2->GetOutputSlot().SetTensorInfo(fp16TensorInfo); + additionLayer->GetOutputSlot().SetTensorInfo(fp16TensorInfo); + + BOOST_CHECK(inputLayer1->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16); + BOOST_CHECK(inputLayer2->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16); + BOOST_CHECK(additionLayer->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16); +} + +BOOST_AUTO_TEST_CASE(Fp16AdditionTest) +{ + using namespace half_float::literal; + // Create runtime in which test will run + IRuntime::CreationOptions options; + IRuntimePtr runtime(IRuntime::Create(options)); + + // Builds up the structure of the network. + INetworkPtr net(INetwork::Create()); + + IConnectableLayer* inputLayer1 = net->AddInputLayer(0); + IConnectableLayer* inputLayer2 = net->AddInputLayer(1); + IConnectableLayer* additionLayer = net->AddAdditionLayer(); + IConnectableLayer* outputLayer = net->AddOutputLayer(0); + + inputLayer1->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0)); + inputLayer2->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(1)); + additionLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + //change to float16 + TensorInfo fp16TensorInfo(TensorShape({4}), DataType::Float16); + inputLayer1->GetOutputSlot(0).SetTensorInfo(fp16TensorInfo); + inputLayer2->GetOutputSlot(0).SetTensorInfo(fp16TensorInfo); + additionLayer->GetOutputSlot(0).SetTensorInfo(fp16TensorInfo); + + // optimize the network + std::vector<Compute> backends = {Compute::GpuAcc}; + IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec()); + + // Loads it into the runtime. + NetworkId netId; + runtime->LoadNetwork(netId, std::move(optNet)); + + std::vector<Half> input1Data + { + 1.0_h, 2.0_h, 3.0_h, 4.0_h + }; + + std::vector<Half> input2Data + { + 100.0_h, 200.0_h, 300.0_h, 400.0_h + }; + + InputTensors inputTensors + { + {0,ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())}, + {1,ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())} + }; + + std::vector<Half> outputData(input1Data.size()); + OutputTensors outputTensors + { + {0,Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())} + }; + + // Does the inference. + runtime->EnqueueWorkload(netId, inputTensors, outputTensors); + + // Checks the results. + BOOST_TEST(outputData == std::vector<Half>({ 101.0_h, 202.0_h, 303.0_h, 404.0_h})); // Add +} + +BOOST_AUTO_TEST_SUITE_END() |