ArmNN
 23.05
NeonConvertFp16ToFp32Workload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
9 
10 #include <Half.hpp>
11 
13 
14 static constexpr arm_compute::ConvertPolicy g_AclConvertPolicy = arm_compute::ConvertPolicy::SATURATE;
15 
16 namespace armnn
17 {
18 
20 {
21  // Fallback to portable software implementation if Compute Library NECast won't work, so
22  // this method always returns success
23 
24  armnn::IgnoreUnused(input);
25  armnn::IgnoreUnused(output);
26  return arm_compute::Status();
27 }
28 
30  const WorkloadInfo& info)
32 {
33  this->m_Data.ValidateInputsOutputs("NeonConvertFp16ToFp32Workload", 1, 1);
34 
35  arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
36  arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
37 
38  if (arm_compute::NECast::validate(input.info(), output.info(), g_AclConvertPolicy))
39  {
40  // Use NECast if supported (needs hardware support for FP16)
41  m_Cast.reset(new arm_compute::NECast());
42  m_Cast->configure(&input, &output, g_AclConvertPolicy);
43  }
44  else
45  {
46  // Else use software implementation using Half.hpp
47  GatherTensorHandlePairs(descriptor, m_TensorHandlePairs);
48  }
49 }
50 
52 {
53  ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvertFp16ToFp32Workload_Execute", this->GetGuid());
54 
55  if (m_Cast)
56  {
57  // Use NECast if supported and initialised
58  m_Cast->run();
59  }
60  else
61  {
62  // Else use softare implementation using Half.hpp
63  auto convertFunc = [](uint8_t* dst, const uint8_t* src, size_t size)
64  {
65  auto input = reinterpret_cast<const Half*>(src);
66  auto output = reinterpret_cast<float*>(dst);
67  size_t numElements = size/2; // 2 bytes per fp16
69  };
70 
71  for (const auto& pair : m_TensorHandlePairs)
72  {
73  CopyTensorContentsGeneric(pair.first, pair.second, convertFunc);
74  }
75  }
76 }
77 
79 {
80  ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
81  this->m_Data.m_Inputs[slot] = tensorHandle;
82  try
83  {
84  Reconfigure();
85  }
87  {
88  // Cannot reconfigure, revert the slot back and throw the exception.
89  this->m_Data.m_Inputs[slot] = backupHandle;
90  throw e;
91  }
92 }
93 
94 // Replace output tensor handle with the given TensorHandle
96 {
97  ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
98  this->m_Data.m_Inputs[slot] = tensorHandle;
99  try
100  {
101  Reconfigure();
102  }
104  {
105  // Cannot reconfigure, revert the slot back and throw the exception.
106  this->m_Data.m_Inputs[slot] = backupHandle;
107  throw e;
108  }
109 }
110 
111 void NeonConvertFp16ToFp32Workload::Reconfigure()
112 {
113  throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
114 }
115 
116 } //namespace armnn
armnn::BaseWorkload::GetGuid
arm::pipe::ProfilingGuid GetGuid() const final
Definition: Workload.hpp:61
armnn::QueueDescriptor::ValidateInputsOutputs
void ValidateInputsOutputs(const std::string &descName, unsigned int numExpectedIn, unsigned int numExpectedOut) const
Definition: WorkloadData.cpp:472
NeonConvertFp16ToFp32Workload.hpp
armnn::NeonConvertFp16ToFp32WorkloadValidate
arm_compute::Status NeonConvertFp16ToFp32WorkloadValidate(const TensorInfo &input, const TensorInfo &output)
Definition: NeonConvertFp16ToFp32Workload.cpp:19
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::BaseWorkload::m_Data
QueueDescriptor m_Data
Definition: Workload.hpp:83
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::GatherTensorHandlePairs
void GatherTensorHandlePairs(const DescriptorType &descriptor, std::vector< std::pair< SrcTensorHandleType *, DstTensorHandleType * >> &tensorHandlePairs)
Definition: WorkloadUtils.hpp:204
armnn::ITensorHandle
Definition: ITensorHandle.hpp:15
armnn::UnimplementedException
Definition: Exceptions.hpp:98
armnn::Half
half_float::half Half
Definition: Half.hpp:22
ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID
#define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid)
Definition: NeonWorkloadUtils.hpp:24
armnn::NeonConvertFp16ToFp32Workload::ReplaceOutputTensorHandle
void ReplaceOutputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
Definition: NeonConvertFp16ToFp32Workload.cpp:95
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::NeonConvertFp16ToFp32Workload::Execute
virtual void Execute() const override
Definition: NeonConvertFp16ToFp32Workload.cpp:51
armnn::ConvertFp16ToFp32QueueDescriptor
Definition: WorkloadData.hpp:447
armnn::Status
Status
Definition: Types.hpp:42
Half.hpp
armnn::NeonConvertFp16ToFp32Workload::NeonConvertFp16ToFp32Workload
NeonConvertFp16ToFp32Workload(const ConvertFp16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info)
Definition: NeonConvertFp16ToFp32Workload.cpp:29
armnn::WorkloadInfo
Contains information about TensorInfos of a layer.
Definition: WorkloadInfo.hpp:16
FloatingPointConverter.hpp
armnn::NeonConvertFp16ToFp32Workload::ReplaceInputTensorHandle
void ReplaceInputTensorHandle(ITensorHandle *tensorHandle, unsigned int slot) override
Definition: NeonConvertFp16ToFp32Workload.cpp:78
armnn::MultiTypedWorkload
Definition: Workload.hpp:149
armnn::CopyTensorContentsGeneric
void CopyTensorContentsGeneric(const ITensorHandle *srcTensor, ITensorHandle *dstTensor, CopyFunc copy)
Definition: WorkloadUtils.hpp:46
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition: WorkloadData.hpp:27
WorkloadUtils.hpp
armnnUtils::FloatingPointConverter::ConvertFloat16To32
static void ConvertFloat16To32(const void *srcFloat16Buffer, size_t numElements, float *dstFloat32Buffer)
Definition: FloatingPointConverter.cpp:31
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition: WorkloadData.hpp:26
armnn::BoostLogSeverityMapping::info
@ info