ArmNN
 21.02
NeonWorkloadUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
10 #include <neon/NeonTimer.hpp>
12 
13 #include <armnn/Utils.hpp>
14 
15 #include <Half.hpp>
16 
17 #define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
18  ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
19  name, \
20  armnn::NeonTimer(), \
21  armnn::WallClockTimer())
22 
23 using namespace armnn::armcomputetensorutils;
24 
25 namespace armnn
26 {
27 
28 template <typename T>
29 void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
30 {
31  InitialiseArmComputeTensorEmpty(dstTensor);
32  CopyArmComputeITensorData(srcData, dstTensor);
33 }
34 
35 inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
36  const ConstCpuTensorHandle* handle)
37 {
38  ARMNN_ASSERT(handle);
39 
40  switch(handle->GetTensorInfo().GetDataType())
41  {
42  case DataType::Float16:
44  break;
45  case DataType::Float32:
46  CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
47  break;
48  case DataType::QAsymmU8:
49  CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
50  break;
54  case DataType::QSymmS8:
55  case DataType::QAsymmS8:
56  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int8_t>());
57  break;
59  case DataType::Signed32:
60  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
61  break;
62  case DataType::QSymmS16:
63  CopyArmComputeTensorData(tensor, handle->GetConstTensor<int16_t>());
64  break;
65  default:
66  ARMNN_ASSERT_MSG(false, "Unexpected tensor type.");
67  }
68 };
69 
70 inline auto SetNeonStridedSliceData(const std::vector<int>& m_begin,
71  const std::vector<int>& m_end,
72  const std::vector<int>& m_stride)
73 {
77 
78  unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
79 
80  for (unsigned int i = 0; i < num_dims; i++)
81  {
82  unsigned int revertedIndex = num_dims - i - 1;
83 
84  starts.set(i, static_cast<int>(m_begin[revertedIndex]));
85  ends.set(i, static_cast<int>(m_end[revertedIndex]));
86  strides.set(i, static_cast<int>(m_stride[revertedIndex]));
87  }
88 
89  return std::make_tuple(starts, ends, strides);
90 }
91 
92 inline auto SetNeonSliceData(const std::vector<unsigned int>& m_begin,
93  const std::vector<unsigned int>& m_size)
94 {
95  // This function must translate the size vector given to an end vector
96  // expected by the ACL NESlice workload
99 
100  unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
101 
102  // For strided slices, we have the relationship size = (end - begin) / stride
103  // For slice, we assume stride to be a vector of all ones, yielding the formula
104  // size = (end - begin) therefore we know end = size + begin
105  for (unsigned int i = 0; i < num_dims; i++)
106  {
107  unsigned int revertedIndex = num_dims - i - 1;
108 
109  starts.set(i, static_cast<int>(m_begin[revertedIndex]));
110  ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
111  }
112 
113  return std::make_tuple(starts, ends);
114 }
115 
116 template <typename DataType, typename PayloadType>
117 DataType* GetOutputTensorData(unsigned int idx, const PayloadType& data)
118 {
119  ITensorHandle* tensorHandle = data.m_Outputs[idx];
120  return reinterpret_cast<DataType*>(tensorHandle->Map());
121 }
122 
123 } //namespace armnn
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
const T * GetConstTensor() const
Copyright (c) 2021 ARM Limited and Contributors.
auto SetNeonStridedSliceData(const std::vector< int > &m_begin, const std::vector< int > &m_end, const std::vector< int > &m_stride)
DataType
Definition: Types.hpp:32
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
DataType GetDataType() const
Definition: Tensor.hpp:194
auto SetNeonSliceData(const std::vector< unsigned int > &m_begin, const std::vector< unsigned int > &m_size)
#define ARMNN_FALLTHROUGH
Definition: Utils.hpp:36
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
void InitializeArmComputeTensorData(arm_compute::Tensor &tensor, const ConstCpuTensorHandle *handle)
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
void CopyArmComputeTensorData(arm_compute::Tensor &dstTensor, const T *srcData)
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)
half_float::half Half
Definition: Half.hpp:16
const TensorInfo & GetTensorInfo() const