ArmNN
 24.02
NeonWorkloadUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
10 #include <neon/NeonTimer.hpp>
12 
13 #include <armnn/Utils.hpp>
14 
15 #include <Half.hpp>
16 
17 #define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
18  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
19  armnn::EmptyOptional(), \
20  name, \
21  armnn::NeonTimer(), \
22  armnn::WallClockTimer())
23 
24 #define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid) \
25  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
26  guid, \
27  GetName() + "_" + name, \
28  armnn::NeonTimer(), \
29  armnn::WallClockTimer())
30 
31 /// Creates a profiling event that uses GetGuid() and GetName() from the calling class
32 #define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label) \
33  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
34  this->GetGuid(), \
35  this->GetName() + "_" + label, \
36  armnn::NeonTimer(), \
37  armnn::WallClockTimer())
38 
39 using namespace armnn::armcomputetensorutils;
40 
41 namespace armnn
42 {
43 
44 inline std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod& convolutionMethod)
45 {
46  switch (convolutionMethod)
47  {
48  case arm_compute::ConvolutionMethod::FFT:
49  return "FFT";
50  case arm_compute::ConvolutionMethod::DIRECT:
51  return "Direct";
52  case arm_compute::ConvolutionMethod::GEMM:
53  return "GEMM";
54  case arm_compute::ConvolutionMethod::WINOGRAD:
55  return "Winograd";
56  default:
57  return "Unknown";
58  }
59 }
60 
61 template <typename T>
62 void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
63 {
64  InitialiseArmComputeTensorEmpty(dstTensor);
65  CopyArmComputeITensorData(srcData, dstTensor);
66 }
67 
68 inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
69  TensorInfo tensorInfo,
70  const ITensorHandle* handle)
71 {
72  ARMNN_ASSERT(handle);
73 
74  switch(tensorInfo.GetDataType())
75  {
76  case DataType::Float16:
77  CopyArmComputeTensorData(tensor, reinterpret_cast<const armnn::Half*>(handle->Map()));
78  break;
79  case DataType::Float32:
80  CopyArmComputeTensorData(tensor, reinterpret_cast<const float*>(handle->Map()));
81  break;
82  case DataType::QAsymmU8:
83  CopyArmComputeTensorData(tensor, reinterpret_cast<const uint8_t*>(handle->Map()));
84  break;
85  case DataType::QSymmS8:
86  case DataType::QAsymmS8:
87  CopyArmComputeTensorData(tensor, reinterpret_cast<const int8_t*>(handle->Map()));
88  break;
89  case DataType::Signed32:
90  CopyArmComputeTensorData(tensor, reinterpret_cast<const int32_t*>(handle->Map()));
91  break;
92  case DataType::QSymmS16:
93  CopyArmComputeTensorData(tensor, reinterpret_cast<const int16_t*>(handle->Map()));
94  break;
95  case DataType::BFloat16:
96  CopyArmComputeTensorData(tensor, reinterpret_cast<const armnn::BFloat16*>(handle->Map()));
97  break;
98  default:
99  // Throw exception; assertion not called in release build.
100  throw Exception("Unexpected tensor type during InitializeArmComputeTensorData().");
101  }
102 };
103 
104 inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
105  const ConstTensorHandle* handle)
106 {
107  ARMNN_ASSERT(handle);
108 
109  switch(handle->GetTensorInfo().GetDataType())
110  {
111  case DataType::Float16:
113  break;
114  case DataType::Float32:
115  CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
116  break;
117  case DataType::QAsymmU8:
118  CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
119  break;
120  case DataType::QSymmS8:
121  case DataType::QAsymmS8:
122  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int8_t>());
123  break;
124  case DataType::Signed32:
125  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
126  break;
127  case DataType::QSymmS16:
128  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int16_t>());
129  break;
130  case DataType::BFloat16:
132  break;
133  default:
134  // Throw exception; assertion not called in release build.
135  throw Exception("Unexpected tensor type during InitializeArmComputeTensorData().");
136  }
137 };
138 
139 inline auto SetNeonStridedSliceData(const std::vector<int>& m_begin,
140  const std::vector<int>& m_end,
141  const std::vector<int>& m_stride)
142 {
145  arm_compute::Coordinates strides;
146 
147  unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
148 
149  for (unsigned int i = 0; i < num_dims; i++)
150  {
151  unsigned int revertedIndex = num_dims - i - 1;
152 
153  starts.set(i, static_cast<int>(m_begin[revertedIndex]));
154  ends.set(i, static_cast<int>(m_end[revertedIndex]));
155  strides.set(i, static_cast<int>(m_stride[revertedIndex]));
156  }
157 
158  return std::make_tuple(starts, ends, strides);
159 }
160 
161 inline auto SetNeonSliceData(const std::vector<unsigned int>& m_begin,
162  const std::vector<unsigned int>& m_size)
163 {
164  // This function must translate the size vector given to an end vector
165  // expected by the ACL NESlice workload
168 
169  unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
170 
171  // For strided slices, we have the relationship size = (end - begin) / stride
172  // For slice, we assume stride to be a vector of all ones, yielding the formula
173  // size = (end - begin) therefore we know end = size + begin
174  for (unsigned int i = 0; i < num_dims; i++)
175  {
176  unsigned int revertedIndex = num_dims - i - 1;
177 
178  starts.set(i, static_cast<int>(m_begin[revertedIndex]));
179  ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
180  }
181 
182  return std::make_tuple(starts, ends);
183 }
184 
185 template <typename DataType, typename PayloadType>
186 DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data)
187 {
188  ITensorHandle* tensorHandle = data.m_Outputs[idx];
189  return reinterpret_cast<DataType*>(tensorHandle->Map());
190 }
191 
192 } //namespace armnn
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::ConstTensorHandle
Definition: TensorHandle.hpp:24
armnn::GetOutputTensorData
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)
Definition: ClWorkloadUtils.hpp:181
armnn::TensorInfo
Definition: Tensor.hpp:152
NeonTensorHandle.hpp
armnn::SetNeonStridedSliceData
auto SetNeonStridedSliceData(const std::vector< int > &m_begin, const std::vector< int > &m_end, const std::vector< int > &m_stride)
Definition: NeonWorkloadUtils.hpp:139
armnn::DataType::Float32
@ Float32
armnn::ITensorHandle
Definition: ITensorHandle.hpp:16
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::ConstTensorHandle::GetTensorInfo
const TensorInfo & GetTensorInfo() const
Definition: TensorHandle.hpp:40
armnn::DataType::QSymmS8
@ QSymmS8
armnn::Half
half_float::half Half
Definition: Half.hpp:22
armnn::Coordinates
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Definition: InternalTypes.hpp:15
armnn::DataType::QSymmS16
@ QSymmS16
armnn::DataType::BFloat16
@ BFloat16
armnn::InitializeArmComputeTensorData
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)
Definition: NeonWorkloadUtils.hpp:68
armnn::ConstTensorHandle::GetConstTensor
const T * GetConstTensor() const
Definition: TensorHandle.hpp:28
armnn::DataType::Float16
@ Float16
Utils.hpp
armnn::DataType
DataType
Definition: Types.hpp:48
Workload.hpp
armnn::GetConvolutionMethodString
std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod &convolutionMethod)
Definition: ClWorkloadUtils.hpp:46
armnn::SetNeonSliceData
auto SetNeonSliceData(const std::vector< unsigned int > &m_begin, const std::vector< unsigned int > &m_size)
Definition: NeonWorkloadUtils.hpp:161
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn::DataType::Signed32
@ Signed32
armnn::DataType::QAsymmS8
@ QAsymmS8
Half.hpp
TensorHandle.hpp
armnn::BFloat16
Definition: BFloat16.hpp:15
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
ArmComputeTensorUtils.hpp
NeonTimer.hpp
armnn::CopyArmComputeTensorData
void CopyArmComputeTensorData(arm_compute::Tensor &dstTensor, const T *srcData)
Definition: NeonWorkloadUtils.hpp:62
armnn::ITensorHandle::Map
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.