From fd627ffaec8fd8801d980b4c91ee7c0607ab6aaf Mon Sep 17 00:00:00 2001 From: Jan Eilers Date: Thu, 25 Feb 2021 17:44:00 +0000 Subject: IVGCVSW-5687 Update Doxygen Docu * Update Doxygen Documentation for 21.02 release Signed-off-by: Jan Eilers Change-Id: I9ed2f9caab038836ea99d7b378d7899fe431a4e5 --- ...convert_fp32_to_bf16_workload_8cpp_source.xhtml | 129 +++++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 21.02/_ref_convert_fp32_to_bf16_workload_8cpp_source.xhtml (limited to '21.02/_ref_convert_fp32_to_bf16_workload_8cpp_source.xhtml') diff --git a/21.02/_ref_convert_fp32_to_bf16_workload_8cpp_source.xhtml b/21.02/_ref_convert_fp32_to_bf16_workload_8cpp_source.xhtml new file mode 100644 index 0000000000..b34d701aff --- /dev/null +++ b/21.02/_ref_convert_fp32_to_bf16_workload_8cpp_source.xhtml @@ -0,0 +1,129 @@ + + + + + + + + + + + + + +ArmNN: src/backends/reference/workloads/RefConvertFp32ToBf16Workload.cpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  21.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
RefConvertFp32ToBf16Workload.cpp
+
+
+Go to the documentation of this file.
1 //
2 // Copyright © 2020 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "RefWorkloadUtils.hpp"
8 
10 
11 #include <BFloat16.hpp>
12 
13 namespace armnn
14 {
15 
17 {
18  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertFp32ToBf16Workload_Execute");
19 
20  const float* const input = GetInputTensorDataFloat(0, m_Data);
21  BFloat16* const output = GetOutputTensorDataBFloat16(0, m_Data);
22 
23  unsigned int numElements = GetTensorInfo(m_Data.m_Inputs[0]).GetNumElements();
25 }
26 
27 } //namespace armnn
const float * GetInputTensorDataFloat(unsigned int idx, const PayloadType &data)
+
CPU Execution: Reference C++ kernels.
+ + + +
const QueueDescriptor m_Data
Definition: Workload.hpp:46
+
Copyright (c) 2021 ARM Limited and Contributors.
+
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:173
+
static void ConvertFloat32ToBFloat16(const float *srcFloat32Buffer, size_t numElements, void *dstBFloat16Buffer)
+ + + +
std::vector< ITensorHandle * > m_Inputs
+
BFloat16 * GetOutputTensorDataBFloat16(unsigned int idx, const PayloadType &data)
+
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
+
unsigned int GetNumElements() const
Definition: Tensor.hpp:192
+
+
+ + + + -- cgit v1.2.1