ArmNN
 23.02
NeonWorkloadUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
10 #include <neon/NeonTimer.hpp>
12 
13 #include <armnn/Utils.hpp>
14 
15 #include <Half.hpp>
16 
17 #define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
18  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
19  armnn::EmptyOptional(), \
20  name, \
21  armnn::NeonTimer(), \
22  armnn::WallClockTimer())
23 
24 #define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid) \
25  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
26  guid, \
27  name, \
28  armnn::NeonTimer(), \
29  armnn::WallClockTimer())
30 
31 using namespace armnn::armcomputetensorutils;
32 
33 namespace armnn
34 {
35 
36 inline std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod& convolutionMethod)
37 {
38  switch (convolutionMethod)
39  {
40  case arm_compute::ConvolutionMethod::FFT:
41  return "FFT";
42  case arm_compute::ConvolutionMethod::DIRECT:
43  return "Direct";
44  case arm_compute::ConvolutionMethod::GEMM:
45  return "GEMM";
46  case arm_compute::ConvolutionMethod::WINOGRAD:
47  return "Winograd";
48  default:
49  return "Unknown";
50  }
51 }
52 
53 template <typename T>
54 void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
55 {
56  InitialiseArmComputeTensorEmpty(dstTensor);
57  CopyArmComputeITensorData(srcData, dstTensor);
58 }
59 
60 inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
61  TensorInfo tensorInfo,
62  const ITensorHandle* handle)
63 {
64  ARMNN_ASSERT(handle);
65 
66  switch(tensorInfo.GetDataType())
67  {
68  case DataType::Float16:
69  CopyArmComputeTensorData(tensor, reinterpret_cast<const armnn::Half*>(handle->Map()));
70  break;
71  case DataType::Float32:
72  CopyArmComputeTensorData(tensor, reinterpret_cast<const float*>(handle->Map()));
73  break;
74  case DataType::QAsymmU8:
75  CopyArmComputeTensorData(tensor, reinterpret_cast<const uint8_t*>(handle->Map()));
76  break;
77  case DataType::QSymmS8:
78  case DataType::QAsymmS8:
79  CopyArmComputeTensorData(tensor, reinterpret_cast<const int8_t*>(handle->Map()));
80  break;
81  case DataType::Signed32:
82  CopyArmComputeTensorData(tensor, reinterpret_cast<const int32_t*>(handle->Map()));
83  break;
84  case DataType::QSymmS16:
85  CopyArmComputeTensorData(tensor, reinterpret_cast<const int16_t*>(handle->Map()));
86  break;
87  case DataType::BFloat16:
88  CopyArmComputeTensorData(tensor, reinterpret_cast<const armnn::BFloat16*>(handle->Map()));
89  break;
90  default:
91  // Throw exception; assertion not called in release build.
92  throw Exception("Unexpected tensor type during InitializeArmComputeTensorData().");
93  }
94 };
95 
96 inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
97  const ConstTensorHandle* handle)
98 {
99  ARMNN_ASSERT(handle);
100 
101  switch(handle->GetTensorInfo().GetDataType())
102  {
103  case DataType::Float16:
105  break;
106  case DataType::Float32:
107  CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
108  break;
109  case DataType::QAsymmU8:
110  CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
111  break;
112  case DataType::QSymmS8:
113  case DataType::QAsymmS8:
114  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int8_t>());
115  break;
116  case DataType::Signed32:
117  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
118  break;
119  case DataType::QSymmS16:
120  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int16_t>());
121  break;
122  case DataType::BFloat16:
124  break;
125  default:
126  // Throw exception; assertion not called in release build.
127  throw Exception("Unexpected tensor type during InitializeArmComputeTensorData().");
128  }
129 };
130 
131 inline auto SetNeonStridedSliceData(const std::vector<int>& m_begin,
132  const std::vector<int>& m_end,
133  const std::vector<int>& m_stride)
134 {
137  arm_compute::Coordinates strides;
138 
139  unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
140 
141  for (unsigned int i = 0; i < num_dims; i++)
142  {
143  unsigned int revertedIndex = num_dims - i - 1;
144 
145  starts.set(i, static_cast<int>(m_begin[revertedIndex]));
146  ends.set(i, static_cast<int>(m_end[revertedIndex]));
147  strides.set(i, static_cast<int>(m_stride[revertedIndex]));
148  }
149 
150  return std::make_tuple(starts, ends, strides);
151 }
152 
153 inline auto SetNeonSliceData(const std::vector<unsigned int>& m_begin,
154  const std::vector<unsigned int>& m_size)
155 {
156  // This function must translate the size vector given to an end vector
157  // expected by the ACL NESlice workload
160 
161  unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
162 
163  // For strided slices, we have the relationship size = (end - begin) / stride
164  // For slice, we assume stride to be a vector of all ones, yielding the formula
165  // size = (end - begin) therefore we know end = size + begin
166  for (unsigned int i = 0; i < num_dims; i++)
167  {
168  unsigned int revertedIndex = num_dims - i - 1;
169 
170  starts.set(i, static_cast<int>(m_begin[revertedIndex]));
171  ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
172  }
173 
174  return std::make_tuple(starts, ends);
175 }
176 
177 template <typename DataType, typename PayloadType>
178 DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data)
179 {
180  ITensorHandle* tensorHandle = data.m_Outputs[idx];
181  return reinterpret_cast<DataType*>(tensorHandle->Map());
182 }
183 
184 } //namespace armnn
armnn::DataType::QAsymmU8
@ QAsymmU8
Utils.hpp
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn::DataType::Float16
@ Float16
armnn::DataType::Signed32
@ Signed32
armnn::ConstTensorHandle
Definition: TensorHandle.hpp:24
Workload.hpp
TensorHandle.hpp
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::InitializeArmComputeTensorData
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)
Definition: NeonWorkloadUtils.hpp:60
armnn::ITensorHandle
Definition: ITensorHandle.hpp:15
armnn::SetNeonStridedSliceData
auto SetNeonStridedSliceData(const std::vector< int > &m_begin, const std::vector< int > &m_end, const std::vector< int > &m_stride)
Definition: NeonWorkloadUtils.hpp:131
armnn::ConstTensorHandle::GetConstTensor
const T * GetConstTensor() const
Definition: TensorHandle.hpp:28
armnn::Half
half_float::half Half
Definition: Half.hpp:22
armnn::ITensorHandle::Map
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
ArmComputeTensorUtils.hpp
armnn::DataType::Float32
@ Float32
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::ConstTensorHandle::GetTensorInfo
const TensorInfo & GetTensorInfo() const
Definition: TensorHandle.hpp:40
armnn::GetOutputTensorData
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)
Definition: ClWorkloadUtils.hpp:173
armnn::BFloat16
Definition: BFloat16.hpp:15
Half.hpp
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::DataType::BFloat16
@ BFloat16
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
NeonTensorHandle.hpp
armnn::SetNeonSliceData
auto SetNeonSliceData(const std::vector< unsigned int > &m_begin, const std::vector< unsigned int > &m_size)
Definition: NeonWorkloadUtils.hpp:153
armnn::CopyArmComputeTensorData
void CopyArmComputeTensorData(arm_compute::Tensor &dstTensor, const T *srcData)
Definition: NeonWorkloadUtils.hpp:54
armnn::DataType::QSymmS8
@ QSymmS8
armnn::DataType::QSymmS16
@ QSymmS16
NeonTimer.hpp
armnn::GetConvolutionMethodString
std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod &convolutionMethod)
Definition: ClWorkloadUtils.hpp:38
armnn::Coordinates
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Definition: InternalTypes.hpp:15
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198