15 #include <arm_compute/runtime/CL/CLTensor.h> 16 #include <arm_compute/runtime/IFunction.h> 20 #define ARMNN_SCOPED_PROFILING_EVENT_CL(name) \ 21 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \ 22 armnn::EmptyOptional(), \ 24 armnn::OpenClTimer(), \ 25 armnn::WallClockTimer()) 27 #define ARMNN_SCOPED_PROFILING_EVENT_CL_GUID(name, guid) \ 28 ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::GpuAcc, \ 31 armnn::OpenClTimer(), \ 32 armnn::WallClockTimer()) 39 switch (convolutionMethod)
41 case arm_compute::ConvolutionMethod::FFT:
43 case arm_compute::ConvolutionMethod::DIRECT:
45 case arm_compute::ConvolutionMethod::GEMM:
47 case arm_compute::ConvolutionMethod::WINOGRAD:
64 armcomputetensorutils::CopyArmComputeITensorData<T>(srcData, dstTensor);
71 const std::vector<int>& m_end,
72 const std::vector<int>& m_stride)
78 unsigned int num_dims =
static_cast<unsigned int>(m_begin.size());
80 for (
unsigned int i = 0; i < num_dims; i++) {
81 unsigned int revertedIndex = num_dims - i - 1;
83 starts.set(i, static_cast<int>(m_begin[revertedIndex]));
84 ends.set(i, static_cast<int>(m_end[revertedIndex]));
85 strides.set(i, static_cast<int>(m_stride[revertedIndex]));
88 return std::make_tuple(starts, ends, strides);
92 const std::vector<unsigned int>& m_size)
99 unsigned int num_dims =
static_cast<unsigned int>(m_begin.size());
104 for (
unsigned int i = 0; i < num_dims; i++)
106 unsigned int revertedIndex = num_dims - i - 1;
108 starts.set(i, static_cast<int>(m_begin[revertedIndex]));
109 ends.set(i, static_cast<int>(m_begin[revertedIndex] + m_size[revertedIndex]));
112 return std::make_tuple(starts, ends);
120 armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor);
153 std::stringstream message;
154 message <<
"CL error: " << clError.
what() <<
". Error code: " << clError.err();
171 template <
typename DataType,
typename PayloadType>
175 return reinterpret_cast<DataType*
>(tensorHandle->
Map());
std::string GetConvolutionMethodString(arm_compute::ConvolutionMethod &convolutionMethod)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_SCOPED_PROFILING_EVENT_CL(name)
void RunClFunction(arm_compute::IFunction &function, const CheckLocation &location)
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
virtual const char * what() const noexcept override
Copyright (c) 2021 ARM Limited and Contributors.
const TensorInfo & GetTensorInfo() const
#define ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_ASSERT_MSG(COND, MSG)
DataType GetDataType() const
#define ARMNN_FALLTHROUGH
RuntimeException WrapClError(const cl::Error &clError, const CheckLocation &location)
#define ARMNN_ASSERT(COND)
auto SetClSliceData(const std::vector< unsigned int > &m_begin, const std::vector< unsigned int > &m_size)
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
void CopyArmComputeClTensorData(arm_compute::CLTensor &dstTensor, const T *srcData)
DataType * GetOutputTensorData(unsigned int idx, const PayloadType &data)
void InitializeArmComputeClTensorData(arm_compute::CLTensor &clTensor, const ConstTensorHandle *handle)
auto SetClStridedSliceData(const std::vector< int > &m_begin, const std::vector< int > &m_end, const std::vector< int > &m_stride)
const T * GetConstTensor() const