aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/ConvertFp16ToFp32TestImpl.hpp
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2018-11-01 16:15:57 +0000
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2018-11-02 14:49:21 +0000
commitc9cc80455ff29fd2c8622c9487ec9c57ade6ea30 (patch)
tree41b1491312fe6082b39d5d37ffa0dcf0ab0f2817 /src/backends/backendsCommon/test/ConvertFp16ToFp32TestImpl.hpp
parent207ef9a6b8b3ea0afe9a095639f67b5dedd095d7 (diff)
downloadarmnn-c9cc80455ff29fd2c8622c9487ec9c57ade6ea30.tar.gz
IVGCVSW-1946: Remove armnn/src from the include paths
Change-Id: I663a0a0fccb43ee960ec070121a59df9db0bb04e
Diffstat (limited to 'src/backends/backendsCommon/test/ConvertFp16ToFp32TestImpl.hpp')
-rw-r--r--src/backends/backendsCommon/test/ConvertFp16ToFp32TestImpl.hpp54
1 files changed, 54 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/test/ConvertFp16ToFp32TestImpl.hpp b/src/backends/backendsCommon/test/ConvertFp16ToFp32TestImpl.hpp
new file mode 100644
index 0000000000..a63f0cbd1d
--- /dev/null
+++ b/src/backends/backendsCommon/test/ConvertFp16ToFp32TestImpl.hpp
@@ -0,0 +1,54 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/TypesUtils.hpp>
+
+#include <Half.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+LayerTestResult<float, 4> SimpleConvertFp16ToFp32Test(armnn::IWorkloadFactory& workloadFactory)
+{
+ using namespace half_float::literal;
+
+ const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
+ const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
+
+ auto input = MakeTensor<armnn::Half, 4>(inputTensorInfo,
+ { -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
+ 1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h });
+
+ LayerTestResult<float, 4> ret(outputTensorInfo);
+ ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
+ { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+ 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::ConvertFp16ToFp32QueueDescriptor data;
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertFp16ToFp32(data, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+ return ret;
+}