10 #include <arm_compute/core/ITensor.h> 11 #include <arm_compute/core/TensorInfo.h> 12 #include <arm_compute/core/Types.h> 13 #include <arm_compute/core/Size2D.h> 17 #include <boost/cast.hpp> 23 namespace armcomputetensorutils
31 unsigned int originalInputRank,
32 const std::vector<unsigned int>& armnnAxes);
35 arm_compute::TensorShape BuildArmComputeTensorShape(
const armnn::TensorShape& tensorShape);
39 arm_compute::TensorInfo BuildArmComputeTensorInfo(
const armnn::TensorInfo& tensorInfo);
44 arm_compute::TensorInfo BuildArmComputeTensorInfo(
const armnn::TensorInfo& tensorInfo,
54 arm_compute::PoolingLayerInfo BuildArmComputePoolingLayerInfo(
const Pooling2dDescriptor& descriptor,
55 bool fpMixedPrecision =
false);
58 arm_compute::NormalizationLayerInfo BuildArmComputeNormalizationLayerInfo(
const NormalizationDescriptor& desc);
67 arm_compute::Size2D BuildArmComputeSize2D(
const unsigned int width,
const unsigned int height);
70 arm_compute::PixelValue GetPixelValue(arm_compute::ITensor& input,
float pixelValue);
73 template <
typename Descriptor>
74 arm_compute::PadStrideInfo BuildArmComputePadStrideInfo(
const Descriptor &descriptor)
76 return arm_compute::PadStrideInfo(descriptor.m_StrideX,
79 descriptor.m_PadRight,
81 descriptor.m_PadBottom,
82 arm_compute::DimensionRoundingType::FLOOR);
86 template <
typename Tensor>
89 tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo));
93 template <
typename Tensor>
96 tensor.allocator()->init(BuildArmComputeTensorInfo(tensorInfo, dataLayout));
99 template <
typename Tensor>
100 void InitialiseArmComputeTensorEmpty(Tensor& tensor)
102 tensor.allocator()->allocate();
106 template <
typename Tensor>
107 void FreeTensorIfUnused(std::unique_ptr<Tensor>& tensor)
109 if (tensor && !tensor->is_used())
111 tensor.reset(
nullptr);
116 inline size_t GetTensorOffset(
const arm_compute::ITensorInfo&
info,
119 uint32_t channelIndex,
124 coords.set(4, static_cast<int>(depthIndex));
125 coords.set(3, static_cast<int>(batchIndex));
126 coords.set(2, static_cast<int>(channelIndex));
127 coords.set(1, static_cast<int>(y));
128 coords.set(0, static_cast<int>(x));
133 inline size_t GetLinearBufferOffset(
const arm_compute::ITensorInfo& info,
136 uint32_t channelIndex,
140 const arm_compute::TensorShape& shape = info.tensor_shape();
141 uint32_t width =
static_cast<uint32_t
>(shape[0]);
142 uint32_t height =
static_cast<uint32_t
>(shape[1]);
143 uint32_t numChannels =
static_cast<uint32_t
>(shape[2]);
144 uint32_t numBatches =
static_cast<uint32_t
>(shape[3]);
145 return (((depthIndex * numBatches + batchIndex) * numChannels + channelIndex) * height + y) * width + x;
148 template <
typename T>
149 void CopyArmComputeITensorData(
const arm_compute::ITensor& srcTensor, T* dstData)
154 const arm_compute::ITensorInfo& info = *srcTensor.info();
155 const arm_compute::TensorShape& shape = info.tensor_shape();
156 const uint8_t*
const bufferPtr = srcTensor.buffer();
157 uint32_t width =
static_cast<uint32_t
>(shape[0]);
158 uint32_t height =
static_cast<uint32_t
>(shape[1]);
159 uint32_t numChannels =
static_cast<uint32_t
>(shape[2]);
160 uint32_t numBatches =
static_cast<uint32_t
>(shape[3]);
161 uint32_t depth =
static_cast<uint32_t
>(shape[4]);
163 for (
unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
165 for (
unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
167 for (
unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
169 for (
unsigned int y = 0; y < height; ++y)
174 dstData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
175 bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
184 template <
typename T>
185 void CopyArmComputeITensorData(
const T* srcData, arm_compute::ITensor& dstTensor)
190 const arm_compute::ITensorInfo& info = *dstTensor.info();
191 const arm_compute::TensorShape& shape = info.tensor_shape();
192 uint8_t*
const bufferPtr = dstTensor.buffer();
193 uint32_t width =
static_cast<uint32_t
>(shape[0]);
194 uint32_t height =
static_cast<uint32_t
>(shape[1]);
195 uint32_t numChannels =
static_cast<uint32_t
>(shape[2]);
196 uint32_t numBatches =
static_cast<uint32_t
>(shape[3]);
197 uint32_t depth =
static_cast<uint32_t
>(shape[4]);
199 for (
unsigned int depthIndex = 0; depthIndex < depth; ++depthIndex)
201 for (
unsigned int batchIndex = 0; batchIndex < numBatches; ++batchIndex)
203 for (
unsigned int channelIndex = 0; channelIndex < numChannels; ++channelIndex)
205 for (
unsigned int y = 0; y < height; ++y)
210 bufferPtr + GetTensorOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
211 srcData + GetLinearBufferOffset(info, depthIndex, batchIndex, channelIndex, y, 0),
226 template<
typename ArmComputeType,
typename T>
227 TensorShape
GetTensorShape(
const ArmComputeType& shapelike, T initial)
230 for (
unsigned int i=0; i < shapelike.num_dimensions(); ++i)
232 s[(shapelike.num_dimensions()-1)-i] = boost::numeric_cast<unsigned int>(shapelike[i]);
234 return TensorShape(boost::numeric_cast<unsigned int>(shapelike.num_dimensions()), s.data());
238 inline TensorShape GetStrides(
const arm_compute::Strides& strides)
244 inline TensorShape GetShape(
const arm_compute::TensorShape& shape)
std::array< unsigned int, MaxNumOfTensorDimensions > Coordinates
Copyright (c) 2020 ARM Limited.
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
constexpr unsigned int MaxNumOfTensorDimensions