ArmNN
 20.05
Pooling2dTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Pooling2dTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 #include <armnn/LayerSupport.hpp>
12 
15 #include <armnnUtils/Permute.hpp>
16 
18 
20 
23 
24 #include <test/TensorHelpers.hpp>
25 
26 #include <boost/numeric/conversion/cast.hpp>
27 
28 namespace
29 {
30 
31 using namespace armnnUtils;
32 
33 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
34 LayerTestResult<T, 4> SimplePooling2dTestImpl(
35  armnn::IWorkloadFactory& workloadFactory,
37  armnn::Pooling2dDescriptor descriptor,
38  float qScale,
39  int32_t qOffset,
40  const boost::multi_array<T, 4>& input,
41  const boost::multi_array<T, 4>& outputExpected)
42 {
43  IgnoreUnused(memoryManager);
44  const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
45  const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
46  auto heightIndex = dimensionIndices.GetHeightIndex();
47  auto widthIndex = dimensionIndices.GetWidthIndex();
48  auto channelsIndex = dimensionIndices.GetChannelsIndex();
49 
50  unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
51  unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
52  unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
53  unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
54 
55  unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
56  unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
57  unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
58  unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
59 
61  inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
62 
64  outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
65 
66  // Set quantization parameters if the requested type is a quantized type.
67  if(armnn::IsQuantizedType<T>())
68  {
69  inputTensorInfo.SetQuantizationScale(qScale);
70  inputTensorInfo.SetQuantizationOffset(qOffset);
71  outputTensorInfo.SetQuantizationScale(qScale);
72  outputTensorInfo.SetQuantizationOffset(qOffset);
73  }
74 
75  LayerTestResult<T, 4> result(outputTensorInfo);
76 
77  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
78  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
79 
80  armnn::Pooling2dQueueDescriptor queueDescriptor;
81  queueDescriptor.m_Parameters = descriptor;
82  queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
83 
84  armnn::WorkloadInfo workloadInfo;
85  AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
86  AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
87 
88  // Don't execute if Pooling is not supported, as an exception will be raised.
89  armnn::BackendId backend = workloadFactory.GetBackendId();
90  const size_t reasonIfUnsupportedMaxLen = 255;
91  char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
92  result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
93  queueDescriptor.m_Parameters,
94  reasonIfUnsupported, reasonIfUnsupportedMaxLen);
95  if (!result.supported)
96  {
97  return result;
98  }
99 
100  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
101 
102  inputHandle->Allocate();
103  outputHandle->Allocate();
104 
105  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
106 
107  workload->Execute();
108 
109  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
110 
111  result.outputExpected = outputExpected;
112 
113  return result;
114 }
115 
116 //
117 // Tests max pooling with the following parameters:
118 //
119 // Pooling size: 3x3
120 // Stride: (2,4)
121 // input size: 8x13
122 // channels: 2
123 // batch size: 2
124 //
125 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
126 LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
127  armnn::IWorkloadFactory& workloadFactory,
129  bool forceNoPadding,
130  float qScale = 1.0f,
131  int32_t qOffset = 0)
132 {
133  armnn::Pooling2dDescriptor descriptor;
135  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
136  descriptor.m_StrideX = 2;
137  descriptor.m_StrideY = 4;
138  // forceNoPadding is mainly used for compatibility with ARM Compute.
139  // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
140  descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
141  descriptor.m_PadTop = descriptor.m_PadBottom = 0;
144 
145  unsigned int inputWidth = 8;
146  unsigned int inputHeight = 13;
147  unsigned int outputWidth =
148  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
149  descriptor.m_StrideX;
150  unsigned int outputHeight =
151  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
152  descriptor.m_StrideY;
153  unsigned int channels = 2;
154  unsigned int batchSize = 2;
155 
156  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
157  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
158 
159  // Set quantization parameters if the requested type is a quantized type.
160  if(armnn::IsQuantizedType<T>())
161  {
162  inputTensorInfo.SetQuantizationScale(qScale);
163  inputTensorInfo.SetQuantizationOffset(qOffset);
164  outputTensorInfo.SetQuantizationScale(qScale);
165  outputTensorInfo.SetQuantizationOffset(qOffset);
166  }
167 
168  std::vector<float> singleChannelData({
169  0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
170  1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
171  8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
172  8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
173  5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
174  1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
175  9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
176  1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
177  6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
178  8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
179  7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
180  4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
181  3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
182  });
183 
184  // Constructs input data.
185  std::vector<float> inputData;
186  auto negator = [](float f) { return -f; };
187 
188  // First image (two channels where the second channel is the negative of the first one).
189  inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
190  std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
191 
192  // Second image (same as first image).
193  inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
194  std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
195 
196  auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
197 
198  // These were calculated manually.
199  auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
200  boost::multi_array<T, 4> outputExpected(shape);
201  if (forceNoPadding)
202  {
203  outputExpected = MakeTensor<T, 4>(outputTensorInfo,
204  QuantizedVector<T>({
205  8.0f, 8.0f, 8.0f,
206  9.0f, 7.0f, 9.0f,
207  9.0f, 9.0f, 9.0f,
208 
209  0.0f, 0.0f, -3.0f,
210  -1.0f, 0.0f, 0.0f,
211  -1.0f, -1.0f, -1.0f,
212 
213  8.0f, 8.0f, 8.0f,
214  9.0f, 7.0f, 9.0f,
215  9.0f, 9.0f, 9.0f,
216 
217  0.0f, 0.0f, -3.0f,
218  -1.0f, 0.0f, 0.0f,
219  -1.0f, -1.0f, -1.0f
220  },
221  qScale, qOffset));
222  }
223  else
224  {
225  outputExpected = MakeTensor<T, 4>(outputTensorInfo,
226  QuantizedVector<T>({
227  0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
228  0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
229  0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
230 
231  0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
232  0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
233  0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
234 
235  0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
236  0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
237  0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
238 
239  0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
240  0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
241  0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
242  },
243  qScale, qOffset));
244  }
245 
246  return SimplePooling2dTestImpl<ArmnnType>(
247  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
248 }
249 
250 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
251 LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
252  armnn::IWorkloadFactory& workloadFactory,
254  const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
255  float qScale = 1.0f,
256  int32_t qOffset = 0)
257 {
258  armnn::Pooling2dDescriptor descriptor;
260  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
261  descriptor.m_StrideX = descriptor.m_StrideY = 2;
263  descriptor.m_DataLayout = dataLayout;
264 
265  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
266  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
267 
268  // Set quantization parameters if the requested type is a quantized type.
269  if(armnn::IsQuantizedType<T>())
270  {
271  inputTensorInfo.SetQuantizationScale(qScale);
272  inputTensorInfo.SetQuantizationOffset(qOffset);
273  outputTensorInfo.SetQuantizationScale(qScale);
274  outputTensorInfo.SetQuantizationOffset(qOffset);
275  }
276 
277  std::vector<T> inputData(
278  QuantizedVector<T>({
279  1.0f, 2.0f, 5.0f, 6.0f,
280  3.0f, 4.0f, 7.0f, 8.0f,
281  9.0f, 10.0f, 13.0f, 14.0f,
282  11.0f, 12.0f, 15.0f, 16.0f,
283 
284  17.0f, 18.0f, 21.0f, 22.0f,
285  19.0f, 20.0f, 23.0f, 24.0f,
286  25.0f, 26.0f, 29.0f, 30.0f,
287  27.0f, 28.0f, 31.0f, 32.0f,
288  },
289  qScale, qOffset));
290 
291  std::vector<T> outputData(
292  QuantizedVector<T>({
293  4.0f, 8.0f,
294  12.0f, 16.0f,
295 
296  20.0f, 24.0f,
297  28.0f, 32.0f,
298  },
299  qScale, qOffset));
300 
301  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
302  if (dataLayout == armnn::DataLayout::NHWC)
303  {
304  std::vector<T> tmp(inputData.size());
305  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
306  inputData = tmp;
307 
308  std::vector<T> tmp1(outputData.size());
309  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
310  outputData = tmp1;
311  }
312 
313  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
314 
315  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
316 
317  return SimplePooling2dTestImpl<ArmnnType>(
318  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
319 }
320 
321 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
322 LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
323  armnn::IWorkloadFactory& workloadFactory,
326  float qScale = 1.0f,
327  int32_t qOffset = 0)
328 {
329  armnn::Pooling2dDescriptor descriptor;
331  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
332  descriptor.m_StrideX = descriptor.m_StrideY = 2;
334  descriptor.m_DataLayout = dataLayout;
335 
336  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
337  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
338 
339  // Set quantization parameters if the requested type is a quantized type.
340  if(armnn::IsQuantizedType<T>())
341  {
342  inputTensorInfo.SetQuantizationScale(qScale);
343  inputTensorInfo.SetQuantizationOffset(qOffset);
344  outputTensorInfo.SetQuantizationScale(qScale);
345  outputTensorInfo.SetQuantizationOffset(qOffset);
346  }
347 
348  std::vector<T> inputData(
349  QuantizedVector<T>({
350  2.0f, 2.0f, 6.0f, 6.0f,
351  4.0f, 4.0f, 8.0f, 8.0f,
352  10.0f, 12.0f, 14.0f, 16.0f,
353  10.0f, 12.0f, 16.0f, 14.0f,
354 
355  18.0f, 20.0f, 24.0f, 22.0f,
356  20.0f, 18.0f, 22.0f, 24.0f,
357  26.0f, 28.0f, 0.0f, 0.0f,
358  26.0f, 28.0f, 0.0f, 0.0f,
359  },
360  qScale, qOffset));
361 
362  std::vector<T> outputData(
363  QuantizedVector<T>({
364  3.0f, 7.0f,
365  11.0f, 15.0f,
366 
367  19.0f, 23.0f,
368  27.0f, 0.0f,
369  },
370  qScale, qOffset));
371 
372  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
373  if (dataLayout == armnn::DataLayout::NHWC)
374  {
375  std::vector<T> tmp(inputData.size());
376  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
377  inputData = tmp;
378 
379  std::vector<T> tmp1(outputData.size());
380  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
381  outputData = tmp1;
382  }
383 
384  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
385 
386  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
387 
388  return SimplePooling2dTestImpl<ArmnnType>(
389  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
390 }
391 
392 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
393 LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
394  armnn::IWorkloadFactory& workloadFactory,
396  float qScale = 1.0f,
397  int32_t qOffset = 0)
398 {
399  armnn::Pooling2dDescriptor descriptor;
401  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
402  descriptor.m_StrideX = descriptor.m_StrideY = 5;
403  descriptor.m_PadLeft = 50;
404  descriptor.m_PadRight = 50;
405  descriptor.m_PadTop = 50;
406  descriptor.m_PadBottom = 50;
408 
409  armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
410  armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
411 
412  // Set quantization parameters if the requested type is a quantized type.
413  if(armnn::IsQuantizedType<T>())
414  {
415  inputTensorInfo.SetQuantizationScale(qScale);
416  inputTensorInfo.SetQuantizationOffset(qOffset);
417  outputTensorInfo.SetQuantizationScale(qScale);
418  outputTensorInfo.SetQuantizationOffset(qOffset);
419  }
420 
421  std::vector<T> inputVec;
422 
423  for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
424  {
425  inputVec.push_back(1);
426  }
427 
428  auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
429 
430  std::vector<T> outputVec;
431 
432  for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
433  {
434  outputVec.push_back(1);
435  }
436 
437  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
438 
439  return SimplePooling2dTestImpl<ArmnnType>(
440  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
441 }
442 
443 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
444 LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
445  armnn::IWorkloadFactory& workloadFactory,
448  float qScale = 1.0f,
449  int32_t qOffset = 0)
450 {
451  armnn::Pooling2dDescriptor descriptor;
453  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
454  descriptor.m_StrideX = descriptor.m_StrideY = 2;
456  descriptor.m_DataLayout = dataLayout;
457 
458  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
459  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
460 
461  std::vector<T> inputData(
462  QuantizedVector<T>({
463  1.0f, 7.0f, 5.0f, 5.0f,
464  1.0f, 7.0f, 5.0f, 5.0f,
465  3.0f, 3.0f, 1.0f, 1.0f,
466  3.0f, 3.0f, 1.0f, 1.0f,
467 
468  1.0f, 7.0f, 0.0f, 0.0f,
469  1.0f, 7.0f, 2.0f, 0.0f,
470  0.0f, 2.0f, 1.0f, 1.0f,
471  0.0f, 0.0f, 1.0f, 1.0f,
472  },
473  qScale, qOffset));
474 
475  std::vector<T> outputData(
476  QuantizedVector<T>({
477  5.0f, 5.0f,
478  3.0f, 1.0f,
479 
480  5.0f, 1.0f,
481  1.0f, 1.0f,
482  },
483  qScale, qOffset));
484 
485  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
486  if (dataLayout == armnn::DataLayout::NHWC)
487  {
488  std::vector<T> tmp(inputData.size());
489  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
490  inputData = tmp;
491 
492  std::vector<T> tmp1(outputData.size());
493  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
494  outputData = tmp1;
495  }
496 
497  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
498 
499  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
500 
501  return SimplePooling2dTestImpl<ArmnnType>(
502  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
503 }
504 
505 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
506 LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
507  armnn::IWorkloadFactory& workloadFactory,
509  float qScale = 1.0f,
510  int32_t qOffset = 0)
511 {
512  armnn::Pooling2dDescriptor descriptor;
514  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
515  descriptor.m_StrideX = descriptor.m_StrideY = 1;
517 
518  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
519  auto input = MakeTensor<T, 4>(inputTensorInfo,
520  QuantizedVector<T>({
521  2.0f, 1.0f, 5.0f, 2.0f,
522  1.0f, 2.0f, 2.0f, 1.0f,
523  5.0f, 4.0f, 1.0f, 5.0f,
524  2.0f, 1.0f, 5.0f, 2.0f,
525  },
526  qScale, qOffset));
527 
528  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
529  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
530  QuantizedVector<T>({
531  3.0f, 3.0f,
532  3.0f, 3.0f,
533  },
534  qScale, qOffset));
535 
536  return SimplePooling2dTestImpl<ArmnnType>(
537  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
538 }
539 
540 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
541 LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
542  armnn::IWorkloadFactory& workloadFactory,
544  float qScale = 1.0f,
545  int32_t qOffset = 0)
546 {
547  armnn::Pooling2dDescriptor descriptor;
549  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
550  descriptor.m_StrideX = descriptor.m_StrideY = 3;
552 
553  armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
554  auto input = MakeTensor<T, 4>(inputTensorInfo,
555  QuantizedVector<T>({
556  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
557  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
558  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
559  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
560  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
561  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
562  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
563  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
564  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
565  },
566  qScale, qOffset));
567 
568  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
569  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
570  QuantizedVector<T>({
571  3.0f, 3.0f, 3.0f,
572  3.0f, 3.0f, 3.0f,
573  3.0f, 3.0f, 3.0f,
574  },
575  qScale, qOffset));
576 
577  return SimplePooling2dTestImpl<ArmnnType>(
578  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
579 }
580 
581 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
582 LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
583  armnn::IWorkloadFactory& workloadFactory,
585  float qScale = 1.0f,
586  int32_t qOffset = 0)
587 {
588  armnn::Pooling2dDescriptor descriptor;
590  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
591  descriptor.m_StrideX = descriptor.m_StrideY = 4;
593 
594  armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
595  auto input = MakeTensor<T, 4>(inputTensorInfo,
596  QuantizedVector<T>({
597  2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
598  1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
599  5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
600  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
601  2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
602  1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
603  5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
604  },
605  qScale, qOffset));
606 
607  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
608  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
609  QuantizedVector<T>({
610  3.0f, 3.0f,
611  3.0f, 3.0f,
612  },
613  qScale, qOffset));
614 
615  return SimplePooling2dTestImpl<ArmnnType>(
616  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
617 }
618 
619 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
620 LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
621  armnn::IWorkloadFactory& workloadFactory,
623  float qScale = 1.0f,
624  int32_t qOffset = 0)
625 {
626  armnn::Pooling2dDescriptor descriptor;
628  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
629  descriptor.m_StrideX = descriptor.m_StrideY = 7;
631 
632  armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
633  auto input = MakeTensor<T, 4>(inputTensorInfo,
634  QuantizedVector<T>({
635  1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
636  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
637  0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
638  8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
639  0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
640  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
641  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
642  },
643  qScale, qOffset));
644 
645  armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
646  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
647  QuantizedVector<T>({
648  3.0f,
649  },
650  qScale, qOffset));
651 
652  return SimplePooling2dTestImpl<ArmnnType>(
653  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
654 }
655 
656 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
657 LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
658  armnn::IWorkloadFactory& workloadFactory,
660  float qScale = 1.0f,
661  int32_t qOffset = 0)
662 {
663  armnn::Pooling2dDescriptor descriptor;
665  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
666  descriptor.m_StrideX = descriptor.m_StrideY = 9;
668 
669  armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
670  auto input = MakeTensor<T, 4>(inputTensorInfo,
671  QuantizedVector<T>({
672  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
673  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
674  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
675  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
676  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
677  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
678  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
679  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
680  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
681  },
682  qScale, qOffset));
683 
684  armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
685  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
686  QuantizedVector<T>({
687  3.0f,
688  },
689  qScale, qOffset));
690 
691  return SimplePooling2dTestImpl<ArmnnType>(
692  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
693 }
694 
695 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
696 LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
697  armnn::IWorkloadFactory& workloadFactory,
699  float qScale = 1.0f,
700  int32_t qOffset = 0)
701 {
702  armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
703  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
704 
705  armnn::Pooling2dDescriptor descriptor;
707  descriptor.m_PoolWidth = 2;
708  descriptor.m_PoolHeight = 3;
709  descriptor.m_StrideX = 2;
710  descriptor.m_StrideY = 1;
711  descriptor.m_PadLeft = 2;
712  descriptor.m_PadRight = 0;
713  descriptor.m_PadTop = 1;
714  descriptor.m_PadBottom = 2;
715  descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
716  descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
717 
718  // Construct input data.
719  auto input = MakeTensor<T, 4>(inputTensorInfo,
720  QuantizedVector<T>({
721  1.0f, 3.0f, 4.0f,
722  },
723  qScale, qOffset));
724 
725  // These were calculated manually.
726  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
727  QuantizedVector<T>({
728  0.0f, 3.0f, 0.0f, 3.0f,
729  },
730  qScale, qOffset));
731 
732  return SimplePooling2dTestImpl<ArmnnType>(
733  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
734 }
735 
736 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
737 LayerTestResult<T, 4> ComparePooling2dTestCommon(
738  armnn::IWorkloadFactory& workloadFactory,
740  armnn::IWorkloadFactory& refWorkloadFactory,
741  armnn::PoolingAlgorithm poolingType,
742  float qScale = 1.0f,
743  int32_t qOffset = 0)
744 {
745  IgnoreUnused(memoryManager);
746  const unsigned int inputWidth = 16;
747  const unsigned int inputHeight = 32;
748  const unsigned int channelCount = 2;
749  const unsigned int batchSize = 5;
750 
751  const unsigned int poolSize = 3;
752  const unsigned int strideX = 2;
753  const unsigned int strideY = 4;
754  const unsigned int padX = 0;
755  const unsigned int padY = 0;
756 
757  const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
758  const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
759 
760  armnn::TensorInfo inputTensorInfo;
761  armnn::TensorInfo outputTensorInfo;
762 
763  unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
764  unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
765 
766  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
767  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
768 
769  // Set quantization parameters if the requested type is a quantized type.
770  if(armnn::IsQuantizedType<T>())
771  {
772  inputTensorInfo.SetQuantizationScale(qScale);
773  inputTensorInfo.SetQuantizationOffset(qOffset);
774  outputTensorInfo.SetQuantizationScale(qScale);
775  outputTensorInfo.SetQuantizationOffset(qOffset);
776  }
777 
778  boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
779 
780  LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
781 
782  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
783  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
784 
787  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
788  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
789  data.m_Parameters.m_PoolType = poolingType;
790  data.m_Parameters.m_PoolWidth = poolSize;
791  data.m_Parameters.m_PoolHeight = poolSize;
792  data.m_Parameters.m_StrideX = strideX;
793  data.m_Parameters.m_StrideY = strideY;
794  data.m_Parameters.m_PadLeft = padX;
795  data.m_Parameters.m_PadRight = padX;
796  data.m_Parameters.m_PadTop = padY;
797  data.m_Parameters.m_PadBottom = padY;
799 
800  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
801  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
802 
803  // Don't execute if Pooling is not supported, as an exception will be raised.
804  armnn::BackendId backend = workloadFactory.GetBackendId();
805  const size_t reasonIfUnsupportedMaxLen = 255;
806  char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
807  comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
808  data.m_Parameters,
809  reasonIfUnsupported, reasonIfUnsupportedMaxLen);
810  if (!comparisonResult.supported)
811  {
812  return comparisonResult;
813  }
814 
815  armnn::Pooling2dQueueDescriptor refData = data;
816  armnn::WorkloadInfo refInfo = info;
817  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
818  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
819 
820  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
821  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
822 
823  outputHandleRef->Allocate();
824  inputHandleRef->Allocate();
825  inputHandle->Allocate();
826  outputHandle->Allocate();
827 
828  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
829  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
830 
831  workload->Execute();
832  workloadRef->Execute();
833 
834  CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
835  CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
836 
837  return comparisonResult;
838 }
839 
840 //
841 // Tests max pooling with the following parameters:
842 //
843 // Pooling size: 2x2
844 // Stride: (2,2)
845 // input size: 4x4
846 // channels: 1
847 // batch size: 1
848 //
849 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
850 LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
851  armnn::IWorkloadFactory& workloadFactory,
853  bool forceNoPadding,
854  float qScale = 1.0f,
855  int32_t qOffset = 0)
856 {
857  armnn::Pooling2dDescriptor descriptor;
859  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
860  descriptor.m_StrideX = 2;
861  descriptor.m_StrideY = 2;
862  descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
863  descriptor.m_PadTop = descriptor.m_PadBottom = 0;
866 
867 
868  unsigned int inputWidth = 4;
869 
870  unsigned int inputHeight = 4;
871 
872  unsigned int outputWidth =
873  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
874  descriptor.m_StrideX;
875  unsigned int outputHeight =
876  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
877  descriptor.m_StrideY;
878  unsigned int channels = 1;
879  unsigned int batchSize = 1;
880 
881  std::vector<float> inputData = {
882  510.0f, 222.0f, 780.0f, 654.0f,
883  141.0f, 276.0f, 15.0f, 546.0f,
884  303.0f, 618.0f, 582.0f, 339.0f,
885  438.0f, 564.0f, 573.0f, 402.0f
886  };
887 
888  // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
889  std::vector<float> expectedOutputDataWithPadding = {
890  0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
891  0.0f, 438.0f, 618.0f, 402.0f, 0.0f
892  };
893 
894  std::vector<float> expectedOutputDataNoPadding = {
895  510.0f, 780.0f,
896  618.0f, 582.0f
897  };
898 
899  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
900 
901  // Scale and offset should match input - we're just calculating maximum values.
902  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
903 
904  // Set quantization parameters if the requested type is a quantized type.
905  if(armnn::IsQuantizedType<T>())
906  {
907  inputTensorInfo.SetQuantizationScale(qScale);
908  inputTensorInfo.SetQuantizationOffset(qOffset);
909  outputTensorInfo.SetQuantizationScale(qScale);
910  outputTensorInfo.SetQuantizationOffset(qOffset);
911  }
912 
913  auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
914 
915  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
916  forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
917  QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
918 
919  return SimplePooling2dTestImpl<ArmnnType>(
920  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
921 }
922 
923 //
924 // Tests max pooling with the following parameters:
925 //
926 // Pooling size: 3x2
927 // Stride: (2,2)
928 // input size: 3x2
929 // channels: 1
930 // batch size: 1
931 //
932 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
933 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
934  armnn::IWorkloadFactory& workloadFactory,
936  bool forceNoPadding,
937  float qScale = 1.0f,
938  int32_t qOffset = 0)
939 {
940  armnn::Pooling2dDescriptor descriptor;
942  descriptor.m_PoolWidth = 3;
943  descriptor.m_PoolHeight = 2;
944  descriptor.m_StrideX = 2;
945  descriptor.m_StrideY = 2;
946  descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
947  descriptor.m_PadRight = descriptor.m_PadLeft;
948  descriptor.m_PadTop = 0;
949  descriptor.m_PadBottom = 0;
952 
953  unsigned int inputWidth = 3;
954  unsigned int inputHeight = 2;
955  unsigned int outputWidth =
956  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
957  descriptor.m_StrideX;
958  unsigned int outputHeight =
959  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
960  descriptor.m_StrideY;
961  unsigned int channels = 1;
962  unsigned int batchSize = 1;
963 
964  std::vector<float> inputData = {
965  3.0f, 6.0f, 9.0f,
966  12.0f, 15.0f, 18.0f,
967  };
968 
969  std::vector<float> expectedOutputDataWithPadding = {
970  6.0f, 8.0f,
971  };
972 
973  std::vector<float> expectedOutputDataNoPadding = {
974  10.5f,
975  };
976 
977  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
978 
979  // Scale and offset should match input - we're just calculating average values.
980  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
981 
982  // Set quantization parameters if the requested type is a quantized type.
983  if(armnn::IsQuantizedType<T>())
984  {
985  inputTensorInfo.SetQuantizationScale(qScale);
986  inputTensorInfo.SetQuantizationOffset(qOffset);
987  outputTensorInfo.SetQuantizationScale(qScale);
988  outputTensorInfo.SetQuantizationOffset(qOffset);
989  }
990 
991  auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
992 
993  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
994  forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
995  QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
996 
997  return SimplePooling2dTestImpl<ArmnnType>(
998  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
999 }
1000 
1001 
1002 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1003 LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
1004  armnn::IWorkloadFactory& workloadFactory,
1006  float qScale = 1.0f,
1007  int32_t qOffset = 0)
1008 {
1009  armnn::Pooling2dDescriptor descriptor;
1011  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1012  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1013  descriptor.m_PadLeft = 1;
1014  descriptor.m_PadRight = 1;
1015  descriptor.m_PadTop = 1;
1016  descriptor.m_PadBottom = 1;
1018 
1019  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1020  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1021 
1022  // Set quantization parameters if the requested type is a quantized type.
1023  if(armnn::IsQuantizedType<T>())
1024  {
1025  inputTensorInfo.SetQuantizationScale(qScale);
1026  inputTensorInfo.SetQuantizationOffset(qOffset);
1027  outputTensorInfo.SetQuantizationScale(qScale);
1028  outputTensorInfo.SetQuantizationOffset(qOffset);
1029  }
1030 
1031  auto input = MakeTensor<T, 4>(inputTensorInfo,
1032  QuantizedVector<T>({
1033  -1.0f, -2.0f, 3.0f, 4.0f,
1034  -1.0f, -2.0f, 3.0f, 4.0f,
1035  1.0f, 2.0f, -3.0f, -4.0f,
1036  1.0f, 2.0f, -3.0f, -4.0f,
1037  },
1038  qScale, qOffset));
1039 
1040  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1041  QuantizedVector<T>({
1042  -1.0f, 3.0f, 4.0f,
1043  1.0f, 3.0f, 4.0f,
1044  1.0f, 2.0f, -4.0f,
1045  },
1046  qScale, qOffset));
1047 
1048  return SimplePooling2dTestImpl<ArmnnType>(
1049  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1050 }
1051 
1052 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1053 LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1054  armnn::IWorkloadFactory& workloadFactory,
1056  float qScale = 1.0f,
1057  int32_t qOffset = 0)
1058 {
1059  armnn::Pooling2dDescriptor descriptor;
1061  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1062  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1063  descriptor.m_PadLeft = 1;
1064  descriptor.m_PadRight = 1;
1065  descriptor.m_PadTop = 1;
1066  descriptor.m_PadBottom = 1;
1068 
1069  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1070  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1071 
1072  // Set quantization parameters if the requested type is a quantized type.
1073  if(armnn::IsQuantizedType<T>())
1074  {
1075  inputTensorInfo.SetQuantizationScale(qScale);
1076  inputTensorInfo.SetQuantizationOffset(qOffset);
1077  outputTensorInfo.SetQuantizationScale(qScale);
1078  outputTensorInfo.SetQuantizationOffset(qOffset);
1079  }
1080 
1081  auto input = MakeTensor<T, 4>(inputTensorInfo,
1082  QuantizedVector<T>({
1083  -1.0f, -2.0f, 3.0f, 4.0f,
1084  -1.0f, -2.0f, 3.0f, 4.0f,
1085  1.0f, 2.0f, -3.0f, -4.0f,
1086  1.0f, 2.0f, -3.0f, -4.0f,
1087  },
1088  qScale, qOffset));
1089 
1090  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1091  QuantizedVector<T>({
1092  -1.0f, 3.0f, 4.0f, 4.0f,
1093  2.0f, 3.0f, 4.0f, 4.0f,
1094  2.0f, 3.0f, 4.0f, 4.0f,
1095  2.0f, 2.0f, 2.0f, -3.0f,
1096  },
1097  qScale, qOffset));
1098 
1099  return SimplePooling2dTestImpl<ArmnnType>(
1100  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1101 }
1102 
1103 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1104 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1105  armnn::IWorkloadFactory& workloadFactory,
1107  float qScale = 1.0f,
1108  int32_t qOffset = 0)
1109 {
1110  armnn::Pooling2dDescriptor descriptor;
1112  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1113  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1114  descriptor.m_PadLeft = 1;
1115  descriptor.m_PadRight = 1;
1116  descriptor.m_PadTop = 1;
1117  descriptor.m_PadBottom = 1;
1119 
1120  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1121  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1122 
1123  // Set quantization parameters if the requested type is a quantized type.
1124  if(armnn::IsQuantizedType<T>())
1125  {
1126  inputTensorInfo.SetQuantizationScale(qScale);
1127  inputTensorInfo.SetQuantizationOffset(qOffset);
1128  outputTensorInfo.SetQuantizationScale(qScale);
1129  outputTensorInfo.SetQuantizationOffset(qOffset);
1130  }
1131 
1132  auto input = MakeTensor<T, 4>(inputTensorInfo,
1133  QuantizedVector<T>({
1134  12.0f, 20.0f, 32.0f, 40.0f,
1135  12.0f, 20.0f, 32.0f, 40.0f,
1136  12.0f, 20.0f, 32.0f, 40.0f,
1137  12.0f, 20.0f, 32.0f, 40.0f,
1138  },
1139  qScale, qOffset));
1140 
1141  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1142  QuantizedVector<T>({
1143  3.0f, 13.0f, 10.0f,
1144  6.0f, 26.0f, 20.0f,
1145  3.0f, 13.0f, 10.0f,
1146  },
1147  qScale, qOffset));
1148 
1149  return SimplePooling2dTestImpl<ArmnnType>(
1150  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1151 }
1152 
1153 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1154 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1155  armnn::IWorkloadFactory& workloadFactory,
1157  float qScale = 1.0f,
1158  int32_t qOffset = 0)
1159 {
1160  armnn::Pooling2dDescriptor descriptor;
1162  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1163  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1164  descriptor.m_PadLeft = 0;
1165  descriptor.m_PadRight = 0;
1166  descriptor.m_PadTop = 0;
1167  descriptor.m_PadBottom = 0;
1170 
1171  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1172  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
1173 
1174  // Set quantization parameters if the requested type is a quantized type.
1175  if(armnn::IsQuantizedType<T>())
1176  {
1177  inputTensorInfo.SetQuantizationScale(qScale);
1178  inputTensorInfo.SetQuantizationOffset(qOffset);
1179  outputTensorInfo.SetQuantizationScale(qScale);
1180  outputTensorInfo.SetQuantizationOffset(qOffset);
1181  }
1182 
1183  auto input = MakeTensor<T, 4>(inputTensorInfo,
1184  QuantizedVector<T>({
1185  1.0f, 2.0f, 3.0f, 4.0f,
1186  1.0f, 2.0f, 3.0f, 4.0f,
1187  1.0f, 2.0f, 3.0f, 4.0f,
1188  1.0f, 2.0f, 3.0f, 4.0f,
1189  },
1190  qScale, qOffset));
1191 
1192  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1193  QuantizedVector<T>({
1194  2.0f, 3.5f,
1195  2.0f, 3.5f
1196  },
1197  qScale, qOffset));
1198 
1199  return SimplePooling2dTestImpl<ArmnnType>(
1200  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1201 }
1202 
1203 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1204 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1205  armnn::IWorkloadFactory& workloadFactory,
1207  float qScale = 1.0f,
1208  int32_t qOffset = 0)
1209 {
1210  armnn::Pooling2dDescriptor descriptor;
1212  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1213  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1214  descriptor.m_PadLeft = 1;
1215  descriptor.m_PadRight = 1;
1216  descriptor.m_PadTop = 1;
1217  descriptor.m_PadBottom = 1;
1219 
1220  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1221  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1222 
1223  // Set quantization parameters if the requested type is a quantized type.
1224  if(armnn::IsQuantizedType<T>())
1225  {
1226  inputTensorInfo.SetQuantizationScale(qScale);
1227  inputTensorInfo.SetQuantizationOffset(qOffset);
1228  outputTensorInfo.SetQuantizationScale(qScale);
1229  outputTensorInfo.SetQuantizationOffset(qOffset);
1230  }
1231 
1232  auto input = MakeTensor<T, 4>(inputTensorInfo,
1233  QuantizedVector<T>({
1234  9.0f, 27.0f, 18.0f, 36.0f,
1235  18.0f, 9.0f, 18.0f, 9.0f,
1236  27.0f, 18.0f, 9.0f, 27.0f,
1237  9.0f, 27.0f, 9.0f, 18.0f,
1238  },
1239  qScale, qOffset));
1240 
1241  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1242  QuantizedVector<T>({
1243  7.0f, 11.0f, 13.0f, 9.0f,
1244  12.0f, 17.0f, 19.0f, 13.0f,
1245  12.0f, 16.0f, 16.0f, 10.0f,
1246  9.0f, 11.0f, 12.0f, 7.0f,
1247  },
1248  qScale, qOffset));
1249 
1250  return SimplePooling2dTestImpl<ArmnnType>(
1251  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1252 }
1253 
1254 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1255 LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1256  armnn::IWorkloadFactory& workloadFactory,
1258  float qScale = 1.0f,
1259  int32_t qOffset = 0)
1260 {
1261  armnn::Pooling2dDescriptor descriptor;
1263  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1264  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1265  descriptor.m_PadLeft = 1;
1266  descriptor.m_PadRight = 1;
1267  descriptor.m_PadTop = 1;
1268  descriptor.m_PadBottom = 1;
1270 
1271  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1272  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1273 
1274  // Set quantization parameters if the requested type is a quantized type.
1275  if(armnn::IsQuantizedType<T>())
1276  {
1277  inputTensorInfo.SetQuantizationScale(qScale);
1278  inputTensorInfo.SetQuantizationOffset(qOffset);
1279  outputTensorInfo.SetQuantizationScale(qScale);
1280  outputTensorInfo.SetQuantizationOffset(qOffset);
1281  }
1282 
1283  auto input = MakeTensor<T, 4>(inputTensorInfo,
1284  QuantizedVector<T>({
1285  2.0f, 4.0f, 8.0f, 16.0f,
1286  4.0f, 2.0f, 2.0f, 4.0f,
1287  8.0f, 2.0f, 4.0f, 2.0f,
1288  16.0f, 2.0f, 2.0f, 8.0f,
1289  },
1290  qScale, qOffset));
1291 
1292  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1293  QuantizedVector<T>({
1294  1.0f, 4.4721f, 8.0f,
1295  4.4721f, 2.6457f, 2.236f,
1296  8.0f, 1.4142f, 4.0f,
1297  },
1298  qScale, qOffset));
1299 
1300  return SimplePooling2dTestImpl<ArmnnType>(
1301  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1302 }
1303 
1304 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1305 LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1306  armnn::IWorkloadFactory& workloadFactory,
1308  float qScale = 1.0f,
1309  int32_t qOffset = 0)
1310 {
1311  armnn::Pooling2dDescriptor descriptor;
1313  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1314  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1315  descriptor.m_PadLeft = 1;
1316  descriptor.m_PadRight = 1;
1317  descriptor.m_PadTop = 1;
1318  descriptor.m_PadBottom = 1;
1320 
1321  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1322  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1323 
1324  // Set quantization parameters if the requested type is a quantized type.
1325  if(armnn::IsQuantizedType<T>())
1326  {
1327  inputTensorInfo.SetQuantizationScale(qScale);
1328  inputTensorInfo.SetQuantizationOffset(qOffset);
1329  outputTensorInfo.SetQuantizationScale(qScale);
1330  outputTensorInfo.SetQuantizationOffset(qOffset);
1331  }
1332 
1333  auto input = MakeTensor<T, 4>(inputTensorInfo,
1334  QuantizedVector<T>({
1335  1.0f, 2.0f, 3.0f, 4.0f,
1336  1.0f, 2.0f, 3.0f, 4.0f,
1337  1.0f, 2.0f, 3.0f, 4.0f,
1338  1.0f, 2.0f, 3.0f, 4.0f,
1339  },
1340  qScale, qOffset));
1341 
1342  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1343  QuantizedVector<T>({
1344  1.0540f, 1.7638f, 2.5385f, 2.3570f,
1345  1.2909f, 2.1602f, 3.1091f, 2.8867f,
1346  1.2909f, 2.1602f, 3.1091f, 2.8867f,
1347  1.0540f, 1.7638f, 2.5385f, 2.3570f,
1348  },
1349  qScale, qOffset));
1350 
1351  return SimplePooling2dTestImpl<ArmnnType>(
1352  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1353 }
1354 
1355 } // anonymous namespace
1356 
1358  armnn::IWorkloadFactory& workloadFactory,
1360  bool forceNoPadding)
1361 {
1362  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1363  workloadFactory, memoryManager, forceNoPadding);
1364 }
1365 
1367  armnn::IWorkloadFactory& workloadFactory,
1369  bool forceNoPadding)
1370 {
1371  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
1372  workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
1373 }
1374 
1376  armnn::IWorkloadFactory& workloadFactory,
1378  bool forceNoPadding)
1379 {
1380  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
1381  workloadFactory, memoryManager, forceNoPadding);
1382 }
1383 
1385  armnn::IWorkloadFactory& workloadFactory,
1387  bool forceNoPadding)
1388 {
1389  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1390  workloadFactory, memoryManager, forceNoPadding);
1391 }
1392 
1394  armnn::IWorkloadFactory& workloadFactory,
1396  bool forceNoPadding)
1397 {
1398  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
1399  workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
1400 }
1401 
1403  armnn::IWorkloadFactory& workloadFactory,
1405  bool forceNoPadding)
1406 {
1407  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
1408  workloadFactory, memoryManager, forceNoPadding);
1409 }
1410 
1412  armnn::IWorkloadFactory& workloadFactory,
1414  const armnn::DataLayout dataLayout)
1415 {
1416  return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1417 }
1418 
1420  armnn::IWorkloadFactory& workloadFactory,
1422  const armnn::DataLayout dataLayout)
1423 {
1424  return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
1425 }
1426 
1428  armnn::IWorkloadFactory& workloadFactory,
1430  const armnn::DataLayout dataLayout)
1431 {
1432  return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
1433 }
1435  armnn::IWorkloadFactory& workloadFactory,
1437 {
1438  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1439 }
1440 
1442  armnn::IWorkloadFactory& workloadFactory,
1444 {
1445  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1446  workloadFactory, memoryManager, 1.0f, -5);
1447 }
1448 
1450  armnn::IWorkloadFactory& workloadFactory,
1452 {
1453  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1454  workloadFactory, memoryManager);
1455 }
1456 
1458  armnn::IWorkloadFactory& workloadFactory,
1460 {
1461  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1462 }
1463 
1465  armnn::IWorkloadFactory& workloadFactory,
1467 {
1468  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1469  workloadFactory, memoryManager, 1.0f, -5);
1470 }
1471 
1473  armnn::IWorkloadFactory& workloadFactory,
1475 {
1476  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1477  workloadFactory, memoryManager);
1478 }
1479 
1481  armnn::IWorkloadFactory& workloadFactory,
1483  const armnn::DataLayout dataLayout)
1484 {
1485  return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1486 }
1487 
1489  armnn::IWorkloadFactory& workloadFactory,
1491  const armnn::DataLayout dataLayout)
1492 {
1493  return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1494  workloadFactory, memoryManager, dataLayout, 0.5, -1);
1495 }
1496 
1498  armnn::IWorkloadFactory& workloadFactory,
1500  const armnn::DataLayout dataLayout)
1501 {
1502  return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1503  workloadFactory, memoryManager, dataLayout);
1504 }
1505 
1507  armnn::IWorkloadFactory& workloadFactory,
1509  bool forceNoPadding)
1510 {
1511  return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1512  workloadFactory, memoryManager, forceNoPadding);
1513 }
1514 
1516  armnn::IWorkloadFactory& workloadFactory,
1518 {
1519  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1520 }
1521 
1523  armnn::IWorkloadFactory& workloadFactory,
1525 {
1526  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1527  workloadFactory, memoryManager, 0.5, -1);
1528 }
1529 
1531  armnn::IWorkloadFactory& workloadFactory,
1533 {
1534  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1535  workloadFactory, memoryManager);
1536 }
1538  armnn::IWorkloadFactory& workloadFactory,
1540 {
1541  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1542 }
1543 
1545  armnn::IWorkloadFactory& workloadFactory,
1547 {
1548  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1549  workloadFactory, memoryManager);
1550 }
1551 
1553  armnn::IWorkloadFactory& workloadFactory,
1555 {
1556  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1557  workloadFactory, memoryManager);
1558 }
1559 
1561  armnn::IWorkloadFactory& workloadFactory,
1563 {
1564  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1565  workloadFactory, memoryManager);
1566 }
1567 
1569  armnn::IWorkloadFactory& workloadFactory,
1571 {
1572  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
1573  workloadFactory, memoryManager);
1574 }
1575 
1577  armnn::IWorkloadFactory& workloadFactory,
1579 {
1580  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
1581  workloadFactory, memoryManager);
1582 }
1583 
1585  armnn::IWorkloadFactory& workloadFactory,
1587 {
1588  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1589 }
1590 
1592  armnn::IWorkloadFactory& workloadFactory,
1594 {
1595  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1596  workloadFactory, memoryManager);
1597 }
1598 
1600  armnn::IWorkloadFactory& workloadFactory,
1602 {
1603  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1604  workloadFactory, memoryManager);
1605 }
1606 
1608  armnn::IWorkloadFactory& workloadFactory,
1610  const armnn::DataLayout dataLayout)
1611 {
1612  return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1613 }
1614 
1616  armnn::IWorkloadFactory& workloadFactory,
1618  const armnn::DataLayout dataLayout)
1619 {
1620  return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
1621 }
1622 
1624  armnn::IWorkloadFactory& workloadFactory,
1626  const armnn::DataLayout dataLayout)
1627 {
1628  return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
1629 }
1630 
1632  armnn::IWorkloadFactory& workloadFactory,
1634 {
1635  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1636 }
1637 
1639  armnn::IWorkloadFactory& workloadFactory,
1641 {
1642  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1643 }
1644 
1646  armnn::IWorkloadFactory& workloadFactory,
1648 {
1649  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1650 }
1651 
1653  armnn::IWorkloadFactory& workloadFactory,
1655 {
1656  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1657 }
1658 
1660  armnn::IWorkloadFactory& workloadFactory,
1662 {
1663  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1664 }
1665 
1667  armnn::IWorkloadFactory& workloadFactory,
1669 {
1670  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1671 }
1673  armnn::IWorkloadFactory& workloadFactory,
1675 {
1676  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1677 }
1678 
1680  armnn::IWorkloadFactory& workloadFactory,
1682 {
1683  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1684 }
1685 
1687  armnn::IWorkloadFactory& workloadFactory,
1689 {
1690  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1691 }
1692 
1694  armnn::IWorkloadFactory& workloadFactory,
1696 {
1697  return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1698 }
1699 
1701  armnn::IWorkloadFactory& workloadFactory,
1703 {
1704  return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1705 }
1706 
1708  armnn::IWorkloadFactory& workloadFactory,
1710 {
1711  return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1712 }
1713 
1715  armnn::IWorkloadFactory& workloadFactory,
1717 {
1718  return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1719 }
1720 
1722  armnn::IWorkloadFactory& workloadFactory,
1724 {
1725  return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1726 }
1727 
1729  armnn::IWorkloadFactory& workloadFactory,
1731 {
1732  return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1733 }
1735  armnn::IWorkloadFactory& workloadFactory,
1737 {
1738  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1739 }
1740 
1742  armnn::IWorkloadFactory& workloadFactory,
1744 {
1745  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1746 }
1747 
1749  armnn::IWorkloadFactory& workloadFactory,
1751 {
1752  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1753 }
1754 
1756  armnn::IWorkloadFactory& workloadFactory,
1758 {
1759  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1760 }
1761 
1763  armnn::IWorkloadFactory& workloadFactory,
1765 {
1766  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1767 }
1768 
1770  armnn::IWorkloadFactory& workloadFactory,
1772 {
1773  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1774 }
1775 
1777  armnn::IWorkloadFactory& workloadFactory,
1779 {
1780  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1781 }
1782 
1784  armnn::IWorkloadFactory& workloadFactory,
1786 {
1787  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1788 }
1789 
1791  armnn::IWorkloadFactory& workloadFactory,
1793 {
1794  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1795 }
1796 
1798  armnn::IWorkloadFactory& workloadFactory,
1800  armnn::IWorkloadFactory& refWorkloadFactory,
1801  armnn::PoolingAlgorithm poolingType)
1802 {
1803  return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1804  workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1805 }
1806 
1808  armnn::IWorkloadFactory& workloadFactory,
1810  armnn::IWorkloadFactory& refWorkloadFactory,
1811  armnn::PoolingAlgorithm poolingType)
1812 {
1813  return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1814  workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
1815 }
1816 
1818  armnn::IWorkloadFactory& workloadFactory,
1820  armnn::IWorkloadFactory& refWorkloadFactory,
1821  armnn::PoolingAlgorithm poolingType)
1822 {
1823  return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
1824  workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1825 }
LayerTestResult< float, 4 > L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetNumElements() const
Definition: Tensor.cpp:107
LayerTestResult< int16_t, 4 > IgnorePaddingL2Pooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual const BackendId & GetBackendId() const =0
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout
Definition: Types.hpp:49
unsigned int GetWidthIndex() const
LayerTestResult< float, 4 > ComparePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorShape & GetShape() const
Definition: Tensor.hpp:88
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerTestResult< float, 4 > IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > L2Pooling2dSize7Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > SimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > ComparePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
LayerTestResult< uint8_t, 4 > IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PoolWidth
Pooling width value.
LayerTestResult< float, 4 > LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > SimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > IgnorePaddingAveragePooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > ComparePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
uint32_t m_PadTop
Padding top value in the height dimension.
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
void IgnoreUnused(Ts &&...)
PoolingAlgorithm
Definition: Types.hpp:96
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< float, 4 > L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetHeightIndex() const
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > IgnorePaddingMaxPooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
uint32_t m_PoolHeight
Pooling height value.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:121
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PadRight
Padding right value in the width dimension.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< float, 4 > SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > L2Pooling2dSize9Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:260
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > AsymmetricNonSquarePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride1Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
The padding fields count, but are ignored.
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > L2Pooling2dSize7Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
LayerTestResult< float, 4 > SimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:276
LayerTestResult< float, 4 > L2Pooling2dSize9Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< int16_t, 4 > LargeTensorsAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetChannelsIndex() const
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)