From de36e4a9c299028e792c3a5bd99ad0816d806077 Mon Sep 17 00:00:00 2001 From: Ryan OShea Date: Fri, 13 Mar 2020 16:26:19 +0000 Subject: IVGCVSW-3726 Upload ArmNN Doxygen files * Upload current ArmNN Doxygen files Signed-off-by: Ryan OShea Change-Id: I8989ed16ee40a99a4495b100bd009cf3e24a7285 --- ...convert_fp32_to_fp16_test_impl_8cpp_source.html | 116 +++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 Documentation/_convert_fp32_to_fp16_test_impl_8cpp_source.html (limited to 'Documentation/_convert_fp32_to_fp16_test_impl_8cpp_source.html') diff --git a/Documentation/_convert_fp32_to_fp16_test_impl_8cpp_source.html b/Documentation/_convert_fp32_to_fp16_test_impl_8cpp_source.html new file mode 100644 index 0000000000..6a6c003125 --- /dev/null +++ b/Documentation/_convert_fp32_to_fp16_test_impl_8cpp_source.html @@ -0,0 +1,116 @@ + + + + + + + +ArmNN: src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp Source File + + + + + + + + + + + + + + +
+
+ + + + + + +
+
ArmNN +  NotReleased +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
ConvertFp32ToFp16TestImpl.cpp
+
+
+Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 
11 
12 #include <test/TensorHelpers.hpp>
13 
15  armnn::IWorkloadFactory& workloadFactory,
17 {
18  boost::ignore_unused(memoryManager);
19  using namespace half_float::literal;
20 
21  const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
22  const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
23 
24  auto input = MakeTensor<float, 4>(inputTensorInfo,
25  { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
26  1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
27 
28  LayerTestResult<armnn::Half, 4> ret(outputTensorInfo);
29  ret.outputExpected = MakeTensor<armnn::Half, 4>(outputTensorInfo,
30  { -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
31  1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h });
32 
33  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
34  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
35 
38  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
39  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
40 
41  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertFp32ToFp16(data, info);
42 
43  inputHandle->Allocate();
44  outputHandle->Allocate();
45 
46  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
47 
48  workload->Execute();
49 
50  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
51 
52  return ret;
53 }
+ +
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
+ +
LayerTestResult< armnn::Half, 4 > SimpleConvertFp32ToFp16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
+
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
+ + +
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
+
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
+ + + + +
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &descriptor, const WorkloadInfo &info) const
+ + +
+
+ + + + -- cgit v1.2.1