From 6940dd720ebb6b3d1df8ca203ab696daefe58189 Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Fri, 20 Mar 2020 12:25:56 +0000 Subject: renamed Documentation folder 20.02 and added .nojekyll file Signed-off-by: Jim Flynn --- ...onvert_fp32_to_fp16_test_impl_8cpp_source.xhtml | 131 +++++++++++++++++++++ 1 file changed, 131 insertions(+) create mode 100644 20.02/_convert_fp32_to_fp16_test_impl_8cpp_source.xhtml (limited to '20.02/_convert_fp32_to_fp16_test_impl_8cpp_source.xhtml') diff --git a/20.02/_convert_fp32_to_fp16_test_impl_8cpp_source.xhtml b/20.02/_convert_fp32_to_fp16_test_impl_8cpp_source.xhtml new file mode 100644 index 0000000000..939f382617 --- /dev/null +++ b/20.02/_convert_fp32_to_fp16_test_impl_8cpp_source.xhtml @@ -0,0 +1,131 @@ + + + + + + + + + + + + + +ArmNN: src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  20.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
ConvertFp32ToFp16TestImpl.cpp
+
+
+Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 
11 
12 #include <test/TensorHelpers.hpp>
13 
15  armnn::IWorkloadFactory& workloadFactory,
17 {
18  IgnoreUnused(memoryManager);
19  using namespace half_float::literal;
20 
21  const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
22  const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
23 
24  auto input = MakeTensor<float, 4>(inputTensorInfo,
25  { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
26  1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
27 
28  LayerTestResult<armnn::Half, 4> ret(outputTensorInfo);
29  ret.outputExpected = MakeTensor<armnn::Half, 4>(outputTensorInfo,
30  { -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
31  1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h });
32 
33  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
34  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
35 
38  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
39  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
40 
41  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertFp32ToFp16(data, info);
42 
43  inputHandle->Allocate();
44  outputHandle->Allocate();
45 
46  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
47 
48  workload->Execute();
49 
50  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
51 
52  return ret;
53 }
+ + + +
void IgnoreUnused(Ts &&...)
+ +
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
+ + +
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
+
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
+ +
virtual std::unique_ptr< IWorkload > CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor &descriptor, const WorkloadInfo &info) const
+ +
Contains information about inputs and outputs to a layer.
+ +
LayerTestResult< armnn::Half, 4 > SimpleConvertFp32ToFp16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
+
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
+
+
+ + + + -- cgit v1.2.1