24 template <
typename ArrayType,
typename Arg>
25 void AssignValues(
unsigned int num,
unsigned int& idx,
const ArrayType& array, Arg& arg)
32 arg = array[(num - 1) - idx];
36 template <
typename T,
typename ArrayType,
typename... Args>
37 void AssignValues(
unsigned int num,
unsigned int idx,
const ArrayType& array, T& assignee, Args&... args)
39 AssignValues(num, idx, array, assignee);
41 AssignValues(num, idx, array, args...);
46 template <
typename CopyFunc>
55 const auto srcSize = srcTensor->
GetStrides()[0] * srcShape[0];
59 const auto dstSize = dstTensor->
GetStrides()[0] * dstShape[0];
63 size_t srcBatches = 1;
66 size_t srcChannels = 1;
76 size_t srcDepthStride = 0;
77 size_t srcBatchStride = 0;
78 size_t srcHeightStride = 0;
79 size_t srcWidthStride = 0;
80 size_t srcChannelStride = 0;
91 size_t dstBatches = 1;
94 size_t dstChannels = 1;
104 size_t dstDepthStride = 0;
105 size_t dstBatchStride = 0;
106 size_t dstHeightStride = 0;
107 size_t dstWidthStride = 0;
108 size_t dstChannelStride = 0;
118 const unsigned char* srcDataStart;
119 unsigned char* dstDataStart;
122 srcDataStart =
static_cast<const uint8_t*
>(srcTensor->
Map());
123 dstDataStart =
static_cast<uint8_t*
>(dstTensor->
Map());
126 size_t copyLength = std::min(srcChannels * srcChannelStride, dstChannels * dstChannelStride);
127 size_t copyWidth = std::min(srcWidth, dstWidth);
128 size_t copyHeight = std::min(srcHeight, dstHeight);
129 size_t copyBatches = std::min(srcBatches, dstBatches);
130 size_t copyDepth = std::min(srcDepth, dstDepth);
135 if (copyLength == srcWidthStride &&
136 copyLength == dstWidthStride)
140 copyLength *= copyWidth;
143 if (copyLength == srcHeightStride &&
144 copyLength == dstHeightStride)
148 copyLength *= copyHeight;
153 const unsigned char* srcData = srcDataStart;
154 unsigned char* dstData = dstDataStart;
155 for (
unsigned int d = 0; d < copyDepth; ++d)
157 auto srcPtrDepth = srcData;
158 auto dstPtrDepth = dstData;
159 for (
unsigned int b = 0; b < copyBatches; ++b)
161 auto srcPtrBatch = srcData;
162 auto dstPtrBatch = dstData;
163 for (
unsigned int h = 0; h < copyHeight; ++h)
165 auto srcPtrChannel = srcData;
166 auto dstPtrChannel = dstData;
167 for (
unsigned int w = 0; w < copyWidth; ++w)
169 ARMNN_ASSERT(srcData >= srcDataStart && srcData + copyLength <= srcDataStart + srcSize);
170 ARMNN_ASSERT(dstData >= dstDataStart && dstData + copyLength <= dstDataStart + dstSize);
171 copy(dstData, srcData, copyLength);
172 dstData += dstWidthStride;
173 srcData += srcWidthStride;
175 dstData += (
static_cast<long>(dstHeightStride) - (dstData - dstPtrChannel));
176 srcData += (
static_cast<long>(srcHeightStride) - (srcData - srcPtrChannel));
178 dstData += (
static_cast<long>(dstBatchStride) - (dstData - dstPtrBatch));
179 srcData += (
static_cast<long>(srcBatchStride) - (srcData - srcPtrBatch));
181 dstData += (
static_cast<long>(dstDepthStride) - (dstData - dstPtrDepth));
182 srcData += (
static_cast<long>(srcDepthStride) - (srcData - srcPtrDepth));
189 template <
typename SrcTensorHandleType,
typename DstTensorHandleType,
typename DescriptorType>
191 std::vector<std::pair<SrcTensorHandleType*, DstTensorHandleType*>>& tensorHandlePairs)
193 const unsigned int numInputs =
static_cast<unsigned int>(descriptor.m_Inputs.size());
194 tensorHandlePairs.reserve(numInputs);
196 for (
unsigned int i = 0; i < numInputs; ++i)
198 SrcTensorHandleType*
const srcTensorHandle =
199 PolymorphicDowncast<SrcTensorHandleType*>(descriptor.m_Inputs[i]);
200 DstTensorHandleType*
const dstTensorHandle =
201 PolymorphicDowncast<DstTensorHandleType*>(descriptor.m_Outputs[i]);
203 tensorHandlePairs.emplace_back(srcTensorHandle, dstTensorHandle);
211 void* permuteBuffer);
219 void* permuteBuffer);
armnn::ConstTensor ConvertWeightTensorFromArmnnToAcl(const ConstCpuTensorHandle *weightTensor, DataLayout dataLayout, void *permuteBuffer)
TensorInfo ConvertWeightTensorInfoFromArmnnToAcl(const TensorInfo &weightInfo, DataLayout dataLayout)
virtual TensorShape GetStrides() const =0
Get the strides for each dimension ordered from largest to smallest where the smallest value is the s...
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
armnn::ConstTensor PermuteTensor(const ConstCpuTensorHandle *tensor, const PermutationVector &permutationVector, void *permuteBuffer)
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
#define ARMNN_ASSERT(COND)
virtual TensorShape GetShape() const =0
Get the number of elements for each dimension ordered from slowest iterating dimension to fastest ite...
virtual const void * Map(bool blocking=true) const =0
Map the tensor data for access.
virtual void Unmap() const =0
Unmap the tensor data.
int32_t ConvertMaskToACLFormat(int32_t mask, int32_t numDim)
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
void CopyTensorContentsGeneric(const ITensorHandle *srcTensor, ITensorHandle *dstTensor, CopyFunc copy)
void GatherTensorHandlePairs(const DescriptorType &descriptor, std::vector< std::pair< SrcTensorHandleType *, DstTensorHandleType *>> &tensorHandlePairs)
void ReshapeWeightsForAcl(TensorInfo &weightInfo, DataLayout dataLayout)
constexpr unsigned int MaxNumOfTensorDimensions