From f4019872c1134c6fcc1d6993e5746f55c1e79208 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Tue, 8 Mar 2022 20:01:38 +0000 Subject: IVGCVSW-6819 Fix the directory structure and broken link to latest docu Signed-off-by: Nikhil Raj Change-Id: I05b559d15faf92c76ff536719693b361316be4f3 --- 22.02/_neon_workload_utils_8hpp_source.xhtml | 146 +++++++++++++++++++++++++++ 1 file changed, 146 insertions(+) create mode 100644 22.02/_neon_workload_utils_8hpp_source.xhtml (limited to '22.02/_neon_workload_utils_8hpp_source.xhtml') diff --git a/22.02/_neon_workload_utils_8hpp_source.xhtml b/22.02/_neon_workload_utils_8hpp_source.xhtml new file mode 100644 index 0000000000..3353dfcae9 --- /dev/null +++ b/22.02/_neon_workload_utils_8hpp_source.xhtml @@ -0,0 +1,146 @@ + + + + + + + + + + + + + +ArmNN: src/backends/neon/workloads/NeonWorkloadUtils.hpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  22.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
NeonWorkloadUtils.hpp
+
+
+Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
10 #include <neon/NeonTimer.hpp>
12 
13 #include <armnn/Utils.hpp>
14 
15 #include <Half.hpp>
16 
17 #define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
18  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
19  armnn::EmptyOptional(), \
20  name, \
21  armnn::NeonTimer(), \
22  armnn::WallClockTimer())
23 
24 #define ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID(name, guid) \
25  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
26  guid, \
27  name, \
28  armnn::NeonTimer(), \
29  armnn::WallClockTimer())
30 
31 using namespace armnn::armcomputetensorutils;
32 
33 namespace armnn
34 {
35 
36 inline std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod& convolutionMethod)
37 {
38  switch (convolutionMethod)
39  {
40  case arm_compute::ConvolutionMethod::FFT:
41  return "FFT";
42  case arm_compute::ConvolutionMethod::DIRECT:
43  return "Direct";
44  case arm_compute::ConvolutionMethod::GEMM:
45  return "GEMM";
46  case arm_compute::ConvolutionMethod::WINOGRAD:
47  return "Winograd";
48  default:
49  return "Unknown";
50  }
51 }
52 
53 template <typename T>
54 void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
55 {
56  InitialiseArmComputeTensorEmpty(dstTensor);
57  CopyArmComputeITensorData(srcData, dstTensor);
58 }
59 
60 inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
61  const ConstTensorHandle* handle)
62 {
63  ARMNN_ASSERT(handle);
64 
65  switch(handle->GetTensorInfo().GetDataType())
66  {
67  case DataType::Float16:
69  break;
70  case DataType::Float32:
71  CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
72  break;
73  case DataType::QAsymmU8:
74  CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
75  break;
76  case DataType::QSymmS8:
77  case DataType::QAsymmS8:
78  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int8_t>());
79  break;
80  case DataType::Signed32:
81  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
82  break;
83  case DataType::QSymmS16:
84  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int16_t>());
85  break;
86  default:
87  ARMNN_ASSERT_MSG(false, "Unexpected tensor type.");
88  }
89 };
90 
91 inline auto SetNeonStridedSliceData(const std::vector<int>& m_begin,
92  const std::vector<int>& m_end,
93  const std::vector<int>& m_stride)
94 {
98 
99  unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
100 
101  for (unsigned int i = 0; i < num_dims; i++)
102  {
103  unsigned int revertedIndex = num_dims - i - 1;
104 
105  starts.set(i, static_cast<int>(m_begin[revertedIndex]));
106  ends.set(i, static_cast<int>(m_end[revertedIndex]));
107  strides.set(i, static_cast<int>(m_stride[revertedIndex]));
108  }
109 
110  return std::make_tuple(starts, ends, strides);
111 }
112 
113 inline auto SetNeonSliceData(const std::vector<unsigned int>& m_begin,
114  const std::vector<unsigned int>& m_size)
115 {
116  // This function must translate the size vector given to an end vector
117  // expected by the ACL NESlice workload
120 
121  unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
122 
123  // For strided slices, we have the relationship size = (end - begin) / stride
124  // For slice, we assume stride to be a vector of all ones, yielding the formula
125  // size = (end - begin) therefore we know end = size + begin
126  for (unsigned int i = 0; i < num_dims; i++)
127  {
128  unsigned int revertedIndex = num_dims - i - 1;
129 
130  starts.set(i, static_cast<int>(m_begin[revertedIndex]));
131  ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
132  }
133 
134  return std::make_tuple(starts, ends);
135 }
136 
137 template <typename DataType, typename PayloadType>
138 DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data)
139 {
140  ITensorHandle* tensorHandle = data.m_Outputs[idx];
141  return reinterpret_cast<DataType*>(tensorHandle->Map());
142 }
143 
144 } //namespace armnn
std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod &convolutionMethod)
+ + +
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
+ + + + +
Copyright (c) 2021 ARM Limited and Contributors.
+
auto SetNeonStridedSliceData(const std::vector< int > &m_begin, const std::vector< int > &m_end, const std::vector< int > &m_stride)
+ +
const TensorInfo & GetTensorInfo() const
+
DataType
Definition: Types.hpp:35
+
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
+ +
DataType GetDataType() const
Definition: Tensor.hpp:198
+ +
auto SetNeonSliceData(const std::vector< unsigned int > &m_begin, const std::vector< unsigned int > &m_size)
+ + +
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
+ + +
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
+
void CopyArmComputeTensorData(arm_compute::Tensor &dstTensor, const T *srcData)
+ +
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)
+
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, const ConstTensorHandle *handle)
+ + +
half_float::half Half
Definition: Half.hpp:18
+ +
const T * GetConstTensor() const
+
+
+ + + + -- cgit v1.2.1