ArmNN
 22.11
RefConvertFp32ToBf16Workload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "RefWorkloadUtils.hpp"
8 
10 
11 #include <BFloat16.hpp>
12 
13 namespace armnn
14 {
15 
17 {
19 }
20 
22 {
23  WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
24  Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
25 }
26 
27 void RefConvertFp32ToBf16Workload::Execute(std::vector<ITensorHandle*> inputs,
28  std::vector<ITensorHandle*> outputs) const
29 {
30  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvertFp32ToBf16Workload_Execute");
31 
32  const float* const input = reinterpret_cast<const float*>(inputs[0]->Map());
33  BFloat16* const output = reinterpret_cast<BFloat16*>(outputs[0]->Map());
34 
35  unsigned int numElements = GetTensorInfo(inputs[0]).GetNumElements();
37 }
38 
39 } //namespace armnn
CPU Execution: Reference C++ kernels.
void ExecuteAsync(ExecutionData &executionData) override
Copyright (c) 2021 ARM Limited and Contributors.
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:220
QueueDescriptor m_Data
Definition: Workload.hpp:83
static void ConvertFloat32ToBFloat16(const float *srcFloat32Buffer, size_t numElements, void *dstBFloat16Buffer)
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers