From ae050524109f1ce827962665436ef7430f2ac479 Mon Sep 17 00:00:00 2001 From: David Monahan Date: Wed, 22 Mar 2023 16:48:58 +0000 Subject: IVGCVSW-7255 Update Doxygen Documentation and publish on GitHub. * Updating Doxygen documentation for 23.02 release. Signed-off-by: David Monahan Change-Id: I545574ff7664b4595d2fe6a91a3c35d2ad55df82 --- 23.02/_neon_workload_utils_8hpp_source.xhtml | 278 ++++++++++++++++++++++----- 1 file changed, 232 insertions(+), 46 deletions(-) (limited to '23.02/_neon_workload_utils_8hpp_source.xhtml') diff --git a/23.02/_neon_workload_utils_8hpp_source.xhtml b/23.02/_neon_workload_utils_8hpp_source.xhtml index 6107268804..3718d5595f 100644 --- a/23.02/_neon_workload_utils_8hpp_source.xhtml +++ b/23.02/_neon_workload_utils_8hpp_source.xhtml @@ -8,7 +8,7 @@ - + ArmNN: src/backends/neon/workloads/NeonWorkloadUtils.hpp Source File @@ -19,9 +19,6 @@ - @@ -30,7 +27,8 @@ extensions: ["tex2jax.js"], jax: ["input/TeX","output/HTML-CSS"], }); - + + @@ -51,18 +49,21 @@ - + +/* @license-end */
@@ -76,7 +77,9 @@ $(function() {
@@ -98,51 +101,234 @@ $(document).ready(function(){initNavTree('_neon_workload_utils_8hpp_source.xhtml
NeonWorkloadUtils.hpp
-Go to the documentation of this file.
1 //
2 // Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
10 #include <neon/NeonTimer.hpp>
12 
13 #include <armnn/Utils.hpp>
14 
15 #include <Half.hpp>
16 
17 #define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
18  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
19  armnn::EmptyOptional(), \
20  name, \
21  armnn::NeonTimer(), \
22  armnn::WallClockTimer())
23 
24 #define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid) \
25  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
26  guid, \
27  name, \
28  armnn::NeonTimer(), \
29  armnn::WallClockTimer())
30 
31 using namespace armnn::armcomputetensorutils;
32 
33 namespace armnn
34 {
35 
36 inline std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod& convolutionMethod)
37 {
38  switch (convolutionMethod)
39  {
40  case arm_compute::ConvolutionMethod::FFT:
41  return "FFT";
42  case arm_compute::ConvolutionMethod::DIRECT:
43  return "Direct";
44  case arm_compute::ConvolutionMethod::GEMM:
45  return "GEMM";
46  case arm_compute::ConvolutionMethod::WINOGRAD:
47  return "Winograd";
48  default:
49  return "Unknown";
50  }
51 }
52 
53 template <typename T>
54 void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
55 {
56  InitialiseArmComputeTensorEmpty(dstTensor);
57  CopyArmComputeITensorData(srcData, dstTensor);
58 }
59 
60 inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
61  TensorInfo tensorInfo,
62  const ITensorHandle* handle)
63 {
64  ARMNN_ASSERT(handle);
65 
66  switch(tensorInfo.GetDataType())
67  {
68  case DataType::Float16:
69  CopyArmComputeTensorData(tensor, reinterpret_cast<const armnn::Half*>(handle->Map()));
70  break;
71  case DataType::Float32:
72  CopyArmComputeTensorData(tensor, reinterpret_cast<const float*>(handle->Map()));
73  break;
74  case DataType::QAsymmU8:
75  CopyArmComputeTensorData(tensor, reinterpret_cast<const uint8_t*>(handle->Map()));
76  break;
77  case DataType::QSymmS8:
78  case DataType::QAsymmS8:
79  CopyArmComputeTensorData(tensor, reinterpret_cast<const int8_t*>(handle->Map()));
80  break;
81  case DataType::Signed32:
82  CopyArmComputeTensorData(tensor, reinterpret_cast<const int32_t*>(handle->Map()));
83  break;
84  case DataType::QSymmS16:
85  CopyArmComputeTensorData(tensor, reinterpret_cast<const int16_t*>(handle->Map()));
86  break;
87  case DataType::BFloat16:
88  CopyArmComputeTensorData(tensor, reinterpret_cast<const armnn::BFloat16*>(handle->Map()));
89  break;
90  default:
91  // Throw exception; assertion not called in release build.
92  throw Exception("Unexpected tensor type during InitializeArmComputeTensorData().");
93  }
94 };
95 
96 inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
97  const ConstTensorHandle* handle)
98 {
99  ARMNN_ASSERT(handle);
100 
101  switch(handle->GetTensorInfo().GetDataType())
102  {
103  case DataType::Float16:
105  break;
106  case DataType::Float32:
107  CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
108  break;
109  case DataType::QAsymmU8:
110  CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
111  break;
112  case DataType::QSymmS8:
113  case DataType::QAsymmS8:
114  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int8_t>());
115  break;
116  case DataType::Signed32:
117  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
118  break;
119  case DataType::QSymmS16:
120  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int16_t>());
121  break;
122  case DataType::BFloat16:
124  break;
125  default:
126  // Throw exception; assertion not called in release build.
127  throw Exception("Unexpected tensor type during InitializeArmComputeTensorData().");
128  }
129 };
130 
131 inline auto SetNeonStridedSliceData(const std::vector<int>& m_begin,
132  const std::vector<int>& m_end,
133  const std::vector<int>& m_stride)
134 {
137  arm_compute::Coordinates strides;
138 
139  unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
140 
141  for (unsigned int i = 0; i < num_dims; i++)
142  {
143  unsigned int revertedIndex = num_dims - i - 1;
144 
145  starts.set(i, static_cast<int>(m_begin[revertedIndex]));
146  ends.set(i, static_cast<int>(m_end[revertedIndex]));
147  strides.set(i, static_cast<int>(m_stride[revertedIndex]));
148  }
149 
150  return std::make_tuple(starts, ends, strides);
151 }
152 
153 inline auto SetNeonSliceData(const std::vector<unsigned int>& m_begin,
154  const std::vector<unsigned int>& m_size)
155 {
156  // This function must translate the size vector given to an end vector
157  // expected by the ACL NESlice workload
160 
161  unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
162 
163  // For strided slices, we have the relationship size = (end - begin) / stride
164  // For slice, we assume stride to be a vector of all ones, yielding the formula
165  // size = (end - begin) therefore we know end = size + begin
166  for (unsigned int i = 0; i < num_dims; i++)
167  {
168  unsigned int revertedIndex = num_dims - i - 1;
169 
170  starts.set(i, static_cast<int>(m_begin[revertedIndex]));
171  ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
172  }
173 
174  return std::make_tuple(starts, ends);
175 }
176 
177 template <typename DataType, typename PayloadType>
178 DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data)
179 {
180  ITensorHandle* tensorHandle = data.m_Outputs[idx];
181  return reinterpret_cast<DataType*>(tensorHandle->Map());
182 }
183 
184 } //namespace armnn
std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod &convolutionMethod)
- - - - - -
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
- - - - -
Copyright (c) 2021 ARM Limited and Contributors.
-
auto SetNeonStridedSliceData(const std::vector< int > &m_begin, const std::vector< int > &m_end, const std::vector< int > &m_stride)
- -
const TensorInfo & GetTensorInfo() const
-
DataType
Definition: Types.hpp:48
- -
DataType GetDataType() const
Definition: Tensor.hpp:198
- -
half_float::half Half
Definition: Half.hpp:22
-
auto SetNeonSliceData(const std::vector< unsigned int > &m_begin, const std::vector< unsigned int > &m_size)
- -
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
-
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)
- - - -
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
-
void CopyArmComputeTensorData(arm_compute::Tensor &dstTensor, const T *srcData)
- -
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
-
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)
- - - -
const T * GetConstTensor() const
+Go to the documentation of this file.
1 //
+
2 // Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+
3 // SPDX-License-Identifier: MIT
+
4 //
+
5 #pragma once
+
6 
+ + + +
10 #include <neon/NeonTimer.hpp>
+ +
12 
+
13 #include <armnn/Utils.hpp>
+
14 
+
15 #include <Half.hpp>
+
16 
+
17 #define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
+
18  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
+
19  armnn::EmptyOptional(), \
+
20  name, \
+
21  armnn::NeonTimer(), \
+
22  armnn::WallClockTimer())
+
23 
+
24 #define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid) \
+
25  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
+
26  guid, \
+
27  name, \
+
28  armnn::NeonTimer(), \
+
29  armnn::WallClockTimer())
+
30 
+
31 using namespace armnn::armcomputetensorutils;
+
32 
+
33 namespace armnn
+
34 {
+
35 
+
36 inline std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod& convolutionMethod)
+
37 {
+
38  switch (convolutionMethod)
+
39  {
+
40  case arm_compute::ConvolutionMethod::FFT:
+
41  return "FFT";
+
42  case arm_compute::ConvolutionMethod::DIRECT:
+
43  return "Direct";
+
44  case arm_compute::ConvolutionMethod::GEMM:
+
45  return "GEMM";
+
46  case arm_compute::ConvolutionMethod::WINOGRAD:
+
47  return "Winograd";
+
48  default:
+
49  return "Unknown";
+
50  }
+
51 }
+
52 
+
53 template <typename T>
+
54 void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
+
55 {
+
56  InitialiseArmComputeTensorEmpty(dstTensor);
+
57  CopyArmComputeITensorData(srcData, dstTensor);
+
58 }
+
59 
+
60 inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
+
61  TensorInfo tensorInfo,
+
62  const ITensorHandle* handle)
+
63 {
+
64  ARMNN_ASSERT(handle);
+
65 
+
66  switch(tensorInfo.GetDataType())
+
67  {
+
68  case DataType::Float16:
+
69  CopyArmComputeTensorData(tensor, reinterpret_cast<const armnn::Half*>(handle->Map()));
+
70  break;
+
71  case DataType::Float32:
+
72  CopyArmComputeTensorData(tensor, reinterpret_cast<const float*>(handle->Map()));
+
73  break;
+
74  case DataType::QAsymmU8:
+
75  CopyArmComputeTensorData(tensor, reinterpret_cast<const uint8_t*>(handle->Map()));
+
76  break;
+
77  case DataType::QSymmS8:
+
78  case DataType::QAsymmS8:
+
79  CopyArmComputeTensorData(tensor, reinterpret_cast<const int8_t*>(handle->Map()));
+
80  break;
+
81  case DataType::Signed32:
+
82  CopyArmComputeTensorData(tensor, reinterpret_cast<const int32_t*>(handle->Map()));
+
83  break;
+
84  case DataType::QSymmS16:
+
85  CopyArmComputeTensorData(tensor, reinterpret_cast<const int16_t*>(handle->Map()));
+
86  break;
+
87  case DataType::BFloat16:
+
88  CopyArmComputeTensorData(tensor, reinterpret_cast<const armnn::BFloat16*>(handle->Map()));
+
89  break;
+
90  default:
+
91  // Throw exception; assertion not called in release build.
+
92  throw Exception("Unexpected tensor type during InitializeArmComputeTensorData().");
+
93  }
+
94 };
+
95 
+
96 inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
+
97  const ConstTensorHandle* handle)
+
98 {
+
99  ARMNN_ASSERT(handle);
+
100 
+
101  switch(handle->GetTensorInfo().GetDataType())
+
102  {
+
103  case DataType::Float16:
+ +
105  break;
+
106  case DataType::Float32:
+
107  CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
+
108  break;
+
109  case DataType::QAsymmU8:
+
110  CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
+
111  break;
+
112  case DataType::QSymmS8:
+
113  case DataType::QAsymmS8:
+
114  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int8_t>());
+
115  break;
+
116  case DataType::Signed32:
+
117  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
+
118  break;
+
119  case DataType::QSymmS16:
+
120  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int16_t>());
+
121  break;
+
122  case DataType::BFloat16:
+ +
124  break;
+
125  default:
+
126  // Throw exception; assertion not called in release build.
+
127  throw Exception("Unexpected tensor type during InitializeArmComputeTensorData().");
+
128  }
+
129 };
+
130 
+
131 inline auto SetNeonStridedSliceData(const std::vector<int>& m_begin,
+
132  const std::vector<int>& m_end,
+
133  const std::vector<int>& m_stride)
+
134 {
+ + +
137  arm_compute::Coordinates strides;
+
138 
+
139  unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
+
140 
+
141  for (unsigned int i = 0; i < num_dims; i++)
+
142  {
+
143  unsigned int revertedIndex = num_dims - i - 1;
+
144 
+
145  starts.set(i, static_cast<int>(m_begin[revertedIndex]));
+
146  ends.set(i, static_cast<int>(m_end[revertedIndex]));
+
147  strides.set(i, static_cast<int>(m_stride[revertedIndex]));
+
148  }
+
149 
+
150  return std::make_tuple(starts, ends, strides);
+
151 }
+
152 
+
153 inline auto SetNeonSliceData(const std::vector<unsigned int>& m_begin,
+
154  const std::vector<unsigned int>& m_size)
+
155 {
+
156  // This function must translate the size vector given to an end vector
+
157  // expected by the ACL NESlice workload
+ + +
160 
+
161  unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
+
162 
+
163  // For strided slices, we have the relationship size = (end - begin) / stride
+
164  // For slice, we assume stride to be a vector of all ones, yielding the formula
+
165  // size = (end - begin) therefore we know end = size + begin
+
166  for (unsigned int i = 0; i < num_dims; i++)
+
167  {
+
168  unsigned int revertedIndex = num_dims - i - 1;
+
169 
+
170  starts.set(i, static_cast<int>(m_begin[revertedIndex]));
+
171  ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
+
172  }
+
173 
+
174  return std::make_tuple(starts, ends);
+
175 }
+
176 
+
177 template <typename DataType, typename PayloadType>
+
178 DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data)
+
179 {
+
180  ITensorHandle* tensorHandle = data.m_Outputs[idx];
+
181  return reinterpret_cast<DataType*>(tensorHandle->Map());
+
182 }
+
183 
+
184 } //namespace armnn
+ + +
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
+ + + + + + +
Copyright (c) 2021 ARM Limited and Contributors.
+
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, TensorInfo tensorInfo, const ITensorHandle *handle)
+ +
auto SetNeonStridedSliceData(const std::vector< int > &m_begin, const std::vector< int > &m_end, const std::vector< int > &m_stride)
+
const T * GetConstTensor() const
+
half_float::half Half
Definition: Half.hpp:22
+
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
+ + + +
const TensorInfo & GetTensorInfo() const
+
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)
+ + +
DataType
Definition: Types.hpp:48
+ +
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
+ +
auto SetNeonSliceData(const std::vector< unsigned int > &m_begin, const std::vector< unsigned int > &m_size)
+
void CopyArmComputeTensorData(arm_compute::Tensor &dstTensor, const T *srcData)
+ + + +
std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod &convolutionMethod)
+
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
+
DataType GetDataType() const
Definition: Tensor.hpp:198
-- cgit v1.2.1