12 #include <arm_compute/core/ITensor.h> 13 #include <arm_compute/core/TensorInfo.h> 14 #include <arm_compute/core/Types.h> 22 namespace armcomputetensorutils
30 unsigned int originalInputRank,
31 const std::vector<unsigned int>& armnnAxes);
34 arm_compute::TensorShape BuildArmComputeTensorShape(
const armnn::TensorShape& tensorShape);
38 arm_compute::TensorInfo BuildArmComputeTensorInfo(
const armnn::TensorInfo& tensorInfo);
43 arm_compute::TensorInfo BuildArmComputeTensorInfo(
const armnn::TensorInfo& tensorInfo,
53 arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(
const Pooling2dDescriptor& descriptor,
54 bool fpMixedPrecision =
false);
57 arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(
const NormalizationDescriptor& desc);
66 arm_compute::Size2D BuildArmComputeSize2D(
const unsigned int width,
const unsigned int height);
69 arm_compute::PixelValue GetPixelValue(
const arm_compute::ITensorInfo* tensorInfo,
float pixelValue);
72 template <
typename Descriptor>
73 arm_compute::PadStrideInfo BuildArmComputePadStrideInfo(
const Descriptor &descriptor)
75 return arm_compute::PadStrideInfo(descriptor.m_StrideX,
78 descriptor.m_PadRight,
80 descriptor.m_PadBottom,
81 arm_compute::DimensionRoundingType::FLOOR);
85 template <
typename Tensor>
88 tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo));
92 template <
typename Tensor>
95 tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo, dataLayout));
98 template <
typename Tensor>
99 void InitialiseArmComputeTensorEmpty(Tensor& tensor)
101 tensor.allocator()->allocate();
105 template <
typename Tensor>
106 void FreeTensorIfUnused(std::unique_ptr<Tensor>& tensor)
108 if (tensor && !tensor->is_used())
110 tensor.reset(
nullptr);
115 inline size_t GetTensorOffset(
const arm_compute::ITensorInfo&
info,
118 uint32_t channelIndex,
123 coords.set(4, static_cast<int>(depthIndex));
124 coords.set(3, static_cast<int>(batchIndex));
125 coords.set(2, static_cast<int>(channelIndex));
126 coords.set(1, static_cast<int>(y));
127 coords.set(0, static_cast<int>(x));
132 inline size_t GetLinearBufferOffset(
const arm_compute::ITensorInfo& info,
135 uint32_t channelIndex,
139 const arm_compute::TensorShape& shape = info.tensor_shape();
140 uint32_t width =
static_cast<uint32_t
>(shape[0]);
141 uint32_t height =
static_cast<uint32_t
>(shape[1]);
142 uint32_t numChannels =
static_cast<uint32_t
>(shape[2]);
143 uint32_t numBatches =
static_cast<uint32_t
>(shape[3]);
144 return (((depthIndex * numBatches + batchIndex) * numChannels + channelIndex) * height + y) * width + x;
147 template <
typename T>
148 void CopyArmComputeITensorData(
const arm_compute::ITensor& srcTensor, T* dstData)
153 const arm_compute::ITensorInfo& info = *srcTensor.info();
154 const arm_compute::TensorShape& shape = info.tensor_shape();
155 const uint8_t*
const bufferPtr = srcTensor.buffer();
156 uint32_t width =
static_cast<uint32_t
>(shape[0]);
157 uint32_t height =
static_cast<uint32_t
>(shape[1]);
158 uint32_t numChannels =
static_cast<uint32_t
>(shape[2]);
159 uint32_t numBatches =
static_cast<uint32_t
>(shape[3]);
160 uint32_t depth =
static_cast<uint32_t
>(shape[4]);
162 for (
unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
164 for (
unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
166 for (
unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
168 for (
unsigned int y = 0; y < height; ++y)
173 dstData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
174 bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
183 template <
typename T>
184 void CopyArmComputeITensorData(
const T* srcData, arm_compute::ITensor& dstTensor)
189 const arm_compute::ITensorInfo& info = *dstTensor.info();
190 const arm_compute::TensorShape& shape = info.tensor_shape();
191 uint8_t*
const bufferPtr = dstTensor.buffer();
192 uint32_t width =
static_cast<uint32_t
>(shape[0]);
193 uint32_t height =
static_cast<uint32_t
>(shape[1]);
194 uint32_t numChannels =
static_cast<uint32_t
>(shape[2]);
195 uint32_t numBatches =
static_cast<uint32_t
>(shape[3]);
196 uint32_t depth =
static_cast<uint32_t
>(shape[4]);
198 for (
unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
200 for (
unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
202 for (
unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
204 for (
unsigned int y = 0; y < height; ++y)
209 bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
210 srcData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
225 template<
typename ArmComputeType,
typename T>
226 TensorShape
GetTensorShape(
const ArmComputeType& shapelike, T initial)
229 for (
unsigned int i=0; i < shapelike.num_dimensions(); ++i)
231 s[(shapelike.num_dimensions()-1)-i] = armnn::numeric_cast<unsigned int>(shapelike[i]);
233 return TensorShape(armnn::numeric_cast<unsigned int>(shapelike.num_dimensions()), s.data());
237 inline TensorShape GetStrides(
const arm_compute::Strides& strides)
243 inline TensorShape GetShape(
const arm_compute::TensorShape& shape)
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Copyright (c) 2021 ARM Limited and Contributors.
const armnnSerializer::Pooling2dDescriptor * Pooling2dDescriptor
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
constexpr unsigned int MaxNumOfTensorDimensions