ArmNN
 21.11
Pooling2dTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Pooling2dTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 #include <armnn/LayerSupport.hpp>
12 
15 #include <armnnUtils/Permute.hpp>
16 
19 
21 
24 
25 #include <test/TensorHelpers.hpp>
26 
27 namespace
28 {
29 
30 using namespace armnnUtils;
31 
32 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
33 LayerTestResult<T, 4> SimplePooling2dTestImpl(
34  armnn::IWorkloadFactory& workloadFactory,
36  const armnn::ITensorHandleFactory& tensorHandleFactory,
37  armnn::Pooling2dDescriptor descriptor,
38  float qScale,
39  int32_t qOffset,
40  const std::vector<T>& input,
41  const std::vector<T>& outputExpected,
42  const armnn::TensorShape& inputShape,
43  const armnn::TensorShape& outputShape)
44 {
45  IgnoreUnused(memoryManager);
46  const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
47  const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
48  auto heightIndex = dimensionIndices.GetHeightIndex();
49  auto widthIndex = dimensionIndices.GetWidthIndex();
50  auto channelsIndex = dimensionIndices.GetChannelsIndex();
51 
52  unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[heightIndex]);
53  unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[widthIndex]);
54  unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[channelsIndex]);
55  unsigned int inputBatchSize = armnn::numeric_cast<unsigned int>(inputShape[0]);
56 
57  unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputShape[heightIndex]);
58  unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputShape[widthIndex]);
59  unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputShape[channelsIndex]);
60  unsigned int outputBatchSize = armnn::numeric_cast<unsigned int>(outputShape[0]);
61 
63  inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
64 
66  outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
67 
68  // Set quantization parameters if the requested type is a quantized type.
69  if(armnn::IsQuantizedType<T>())
70  {
71  inputTensorInfo.SetQuantizationScale(qScale);
72  inputTensorInfo.SetQuantizationOffset(qOffset);
73  outputTensorInfo.SetQuantizationScale(qScale);
74  outputTensorInfo.SetQuantizationOffset(qOffset);
75  }
76 
77  LayerTestResult<T, 4> result(outputTensorInfo);
78  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
79 
80  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
81  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
82 
83  armnn::Pooling2dQueueDescriptor queueDescriptor;
84  queueDescriptor.m_Parameters = descriptor;
85  queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
86 
87  armnn::WorkloadInfo workloadInfo;
88  AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
89  AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
90 
91  // Don't execute if Pooling is not supported, as an exception will be raised.
92  armnn::BackendId backend = workloadFactory.GetBackendId();
93  const size_t reasonIfUnsupportedMaxLen = 255;
94  char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
95  result.m_Supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
96  queueDescriptor.m_Parameters,
97  reasonIfUnsupported, reasonIfUnsupportedMaxLen);
98  if (!result.m_Supported)
99  {
100  return result;
101  }
102 
103  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
104 
105  inputHandle->Allocate();
106  outputHandle->Allocate();
107 
108  CopyDataToITensorHandle(inputHandle.get(), input.data());
109 
110  workload->Execute();
111 
112  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
113 
114  result.m_ActualData = actualOutput;
115  result.m_ExpectedData = outputExpected;
116 
117  return result;
118 }
119 
120 //
121 // Tests max pooling with the following parameters:
122 //
123 // Pooling size: 3x3
124 // Stride: (2,4)
125 // input size: 8x13
126 // channels: 2
127 // batch size: 2
128 //
129 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
130 LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
131  armnn::IWorkloadFactory& workloadFactory,
133  const armnn::ITensorHandleFactory& tensorHandleFactory,
134  bool forceNoPadding,
135  float qScale = 1.0f,
136  int32_t qOffset = 0)
137 {
138  armnn::Pooling2dDescriptor descriptor;
140  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
141  descriptor.m_StrideX = 2;
142  descriptor.m_StrideY = 4;
143  // forceNoPadding is mainly used for compatibility with ARM Compute.
144  // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
145  descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
146  descriptor.m_PadTop = descriptor.m_PadBottom = 0;
149 
150  unsigned int inputWidth = 8;
151  unsigned int inputHeight = 13;
152  unsigned int outputWidth =
153  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
154  descriptor.m_StrideX;
155  unsigned int outputHeight =
156  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
157  descriptor.m_StrideY;
158  unsigned int channels = 2;
159  unsigned int batchSize = 2;
160 
161  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
162  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
163 
164  // Set quantization parameters if the requested type is a quantized type.
165  if(armnn::IsQuantizedType<T>())
166  {
167  inputTensorInfo.SetQuantizationScale(qScale);
168  inputTensorInfo.SetQuantizationOffset(qOffset);
169  outputTensorInfo.SetQuantizationScale(qScale);
170  outputTensorInfo.SetQuantizationOffset(qOffset);
171  }
172 
173  std::vector<float> singleChannelData({
174  0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
175  1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
176  8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
177  8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
178  5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
179  1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
180  9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
181  1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
182  6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
183  8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
184  7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
185  4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
186  3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
187  });
188 
189  // Constructs input data.
190  std::vector<float> inputData;
191  auto negator = [](float f) { return -f; };
192 
193  // First image (two channels where the second channel is the negative of the first one).
194  inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
195  std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
196 
197  // Second image (same as first image).
198  inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
199  std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
200 
201  auto input = QuantizedVector<T>(inputData, qScale, qOffset);
202 
203  // These were calculated manually.
204  std::vector<T> outputExpected;
205  if (forceNoPadding)
206  {
207  outputExpected = QuantizedVector<T>(
208  {
209  8.0f, 8.0f, 8.0f,
210  9.0f, 7.0f, 9.0f,
211  9.0f, 9.0f, 9.0f,
212 
213  0.0f, 0.0f, -3.0f,
214  -1.0f, 0.0f, 0.0f,
215  -1.0f, -1.0f, -1.0f,
216 
217  8.0f, 8.0f, 8.0f,
218  9.0f, 7.0f, 9.0f,
219  9.0f, 9.0f, 9.0f,
220 
221  0.0f, 0.0f, -3.0f,
222  -1.0f, 0.0f, 0.0f,
223  -1.0f, -1.0f, -1.0f
224  },
225  qScale, qOffset);
226  }
227  else
228  {
229  outputExpected = QuantizedVector<T>(
230  {
231  0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
232  0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
233  0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
234 
235  0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
236  0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
237  0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
238 
239  0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
240  0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
241  0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
242 
243  0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
244  0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
245  0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
246  },
247  qScale, qOffset);
248  }
249 
250  return SimplePooling2dTestImpl<ArmnnType>(
251  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
252  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
253 }
254 
255 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
256 LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
257  armnn::IWorkloadFactory& workloadFactory,
259  const armnn::ITensorHandleFactory& tensorHandleFactory,
260  const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
261  float qScale = 1.0f,
262  int32_t qOffset = 0)
263 {
264  armnn::Pooling2dDescriptor descriptor;
266  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
267  descriptor.m_StrideX = descriptor.m_StrideY = 2;
269  descriptor.m_DataLayout = dataLayout;
270 
271  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
272  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
273 
274  // Set quantization parameters if the requested type is a quantized type.
275  if(armnn::IsQuantizedType<T>())
276  {
277  inputTensorInfo.SetQuantizationScale(qScale);
278  inputTensorInfo.SetQuantizationOffset(qOffset);
279  outputTensorInfo.SetQuantizationScale(qScale);
280  outputTensorInfo.SetQuantizationOffset(qOffset);
281  }
282 
283  std::vector<T> inputData(
284  QuantizedVector<T>({
285  1.0f, 2.0f, 5.0f, 6.0f,
286  3.0f, 4.0f, 7.0f, 8.0f,
287  9.0f, 10.0f, 13.0f, 14.0f,
288  11.0f, 12.0f, 15.0f, 16.0f,
289 
290  17.0f, 18.0f, 21.0f, 22.0f,
291  19.0f, 20.0f, 23.0f, 24.0f,
292  25.0f, 26.0f, 29.0f, 30.0f,
293  27.0f, 28.0f, 31.0f, 32.0f,
294  },
295  qScale, qOffset));
296 
297  std::vector<T> outputData(
298  QuantizedVector<T>({
299  4.0f, 8.0f,
300  12.0f, 16.0f,
301 
302  20.0f, 24.0f,
303  28.0f, 32.0f,
304  },
305  qScale, qOffset));
306 
307  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
308  if (dataLayout == armnn::DataLayout::NHWC)
309  {
310  std::vector<T> tmp(inputData.size());
311  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
312  inputData = tmp;
313 
314  std::vector<T> tmp1(outputData.size());
315  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
316  outputData = tmp1;
317  }
318 
319  return SimplePooling2dTestImpl<ArmnnType>(
320  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
321  inputData, outputData, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
322 }
323 
324 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
325 LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
326  armnn::IWorkloadFactory& workloadFactory,
328  const armnn::ITensorHandleFactory& tensorHandleFactory,
330  float qScale = 1.0f,
331  int32_t qOffset = 0)
332 {
333  armnn::Pooling2dDescriptor descriptor;
335  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
336  descriptor.m_StrideX = descriptor.m_StrideY = 2;
338  descriptor.m_DataLayout = dataLayout;
339 
340  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
341  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
342 
343  // Set quantization parameters if the requested type is a quantized type.
344  if(armnn::IsQuantizedType<T>())
345  {
346  inputTensorInfo.SetQuantizationScale(qScale);
347  inputTensorInfo.SetQuantizationOffset(qOffset);
348  outputTensorInfo.SetQuantizationScale(qScale);
349  outputTensorInfo.SetQuantizationOffset(qOffset);
350  }
351 
352  std::vector<T> inputData(
353  QuantizedVector<T>({
354  2.0f, 2.0f, 6.0f, 6.0f,
355  4.0f, 4.0f, 8.0f, 8.0f,
356  10.0f, 12.0f, 14.0f, 16.0f,
357  10.0f, 12.0f, 16.0f, 14.0f,
358 
359  18.0f, 20.0f, 24.0f, 22.0f,
360  20.0f, 18.0f, 22.0f, 24.0f,
361  26.0f, 28.0f, 0.0f, 0.0f,
362  26.0f, 28.0f, 0.0f, 0.0f,
363  },
364  qScale, qOffset));
365 
366  std::vector<T> outputData(
367  QuantizedVector<T>({
368  3.0f, 7.0f,
369  11.0f, 15.0f,
370 
371  19.0f, 23.0f,
372  27.0f, 0.0f,
373  },
374  qScale, qOffset));
375 
376  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
377  if (dataLayout == armnn::DataLayout::NHWC)
378  {
379  std::vector<T> tmp(inputData.size());
380  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
381  inputData = tmp;
382 
383  std::vector<T> tmp1(outputData.size());
384  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
385  outputData = tmp1;
386  }
387 
388  return SimplePooling2dTestImpl<ArmnnType>(
389  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
390  inputData, outputData, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
391 }
392 
393 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
394 LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
395  armnn::IWorkloadFactory& workloadFactory,
397  const armnn::ITensorHandleFactory& tensorHandleFactory,
398  float qScale = 1.0f,
399  int32_t qOffset = 0)
400 {
401  armnn::Pooling2dDescriptor descriptor;
403  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
404  descriptor.m_StrideX = descriptor.m_StrideY = 5;
405  descriptor.m_PadLeft = 50;
406  descriptor.m_PadRight = 50;
407  descriptor.m_PadTop = 50;
408  descriptor.m_PadBottom = 50;
410 
411  armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
412  armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
413 
414  // Set quantization parameters if the requested type is a quantized type.
415  if(armnn::IsQuantizedType<T>())
416  {
417  inputTensorInfo.SetQuantizationScale(qScale);
418  inputTensorInfo.SetQuantizationOffset(qOffset);
419  outputTensorInfo.SetQuantizationScale(qScale);
420  outputTensorInfo.SetQuantizationOffset(qOffset);
421  }
422 
423  std::vector<T> input;
424 
425  for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
426  {
427  input.push_back(1);
428  }
429 
430  std::vector<T> outputExpected;
431 
432  for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
433  {
434  outputExpected.push_back(1);
435  }
436 
437  return SimplePooling2dTestImpl<ArmnnType>(
438  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
439  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
440 }
441 
442 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
443 LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
444  armnn::IWorkloadFactory& workloadFactory,
446  const armnn::ITensorHandleFactory& tensorHandleFactory,
448  float qScale = 1.0f,
449  int32_t qOffset = 0)
450 {
451  armnn::Pooling2dDescriptor descriptor;
453  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
454  descriptor.m_StrideX = descriptor.m_StrideY = 2;
456  descriptor.m_DataLayout = dataLayout;
457 
458  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
459  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
460 
461  std::vector<T> inputData(
462  QuantizedVector<T>({
463  1.0f, 7.0f, 5.0f, 5.0f,
464  1.0f, 7.0f, 5.0f, 5.0f,
465  3.0f, 3.0f, 1.0f, 1.0f,
466  3.0f, 3.0f, 1.0f, 1.0f,
467 
468  1.0f, 7.0f, 0.0f, 0.0f,
469  1.0f, 7.0f, 2.0f, 0.0f,
470  0.0f, 2.0f, 1.0f, 1.0f,
471  0.0f, 0.0f, 1.0f, 1.0f,
472  },
473  qScale, qOffset));
474 
475  std::vector<T> outputData(
476  QuantizedVector<T>({
477  5.0f, 5.0f,
478  3.0f, 1.0f,
479 
480  5.0f, 1.0f,
481  1.0f, 1.0f,
482  },
483  qScale, qOffset));
484 
485  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
486  if (dataLayout == armnn::DataLayout::NHWC)
487  {
488  std::vector<T> tmp(inputData.size());
489  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
490  inputData = tmp;
491 
492  std::vector<T> tmp1(outputData.size());
493  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
494  outputData = tmp1;
495  }
496 
497  return SimplePooling2dTestImpl<ArmnnType>(
498  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
499  inputData, outputData, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
500 }
501 
502 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
503 LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
504  armnn::IWorkloadFactory& workloadFactory,
506  const armnn::ITensorHandleFactory& tensorHandleFactory,
507  float qScale = 1.0f,
508  int32_t qOffset = 0)
509 {
510  armnn::Pooling2dDescriptor descriptor;
512  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
513  descriptor.m_StrideX = descriptor.m_StrideY = 1;
515 
516  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
517  auto input = QuantizedVector<T>(
518  {
519  2.0f, 1.0f, 5.0f, 2.0f,
520  1.0f, 2.0f, 2.0f, 1.0f,
521  5.0f, 4.0f, 1.0f, 5.0f,
522  2.0f, 1.0f, 5.0f, 2.0f,
523  },
524  qScale, qOffset);
525 
526  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
527  auto outputExpected = QuantizedVector<T>(
528  {
529  3.0f, 3.0f,
530  3.0f, 3.0f,
531  },
532  qScale, qOffset);
533 
534  return SimplePooling2dTestImpl<ArmnnType>(
535  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
536  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
537 }
538 
539 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
540 LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
541  armnn::IWorkloadFactory& workloadFactory,
543  const armnn::ITensorHandleFactory& tensorHandleFactory,
544  float qScale = 1.0f,
545  int32_t qOffset = 0)
546 {
547  armnn::Pooling2dDescriptor descriptor;
549  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
550  descriptor.m_StrideX = descriptor.m_StrideY = 3;
552 
553  armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
554  auto input = QuantizedVector<T>(
555  {
556  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
557  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
558  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
559  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
560  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
561  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
562  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
563  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
564  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
565  },
566  qScale, qOffset);
567 
568  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
569  auto outputExpected = QuantizedVector<T>(
570  {
571  3.0f, 3.0f, 3.0f,
572  3.0f, 3.0f, 3.0f,
573  3.0f, 3.0f, 3.0f,
574  },
575  qScale, qOffset);
576 
577  return SimplePooling2dTestImpl<ArmnnType>(
578  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
579  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
580 }
581 
582 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
583 LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
584  armnn::IWorkloadFactory& workloadFactory,
586  const armnn::ITensorHandleFactory& tensorHandleFactory,
587  float qScale = 1.0f,
588  int32_t qOffset = 0)
589 {
590  armnn::Pooling2dDescriptor descriptor;
592  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
593  descriptor.m_StrideX = descriptor.m_StrideY = 4;
595 
596  armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
597  auto input = QuantizedVector<T>(
598  {
599  2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
600  1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
601  5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
602  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
603  2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
604  1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
605  5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
606  },
607  qScale, qOffset);
608 
609  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
610  auto outputExpected = QuantizedVector<T>(
611  {
612  3.0f, 3.0f,
613  3.0f, 3.0f,
614  },
615  qScale, qOffset);
616 
617  return SimplePooling2dTestImpl<ArmnnType>(
618  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
619  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
620 }
621 
622 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
623 LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
624  armnn::IWorkloadFactory& workloadFactory,
626  const armnn::ITensorHandleFactory& tensorHandleFactory,
627  float qScale = 1.0f,
628  int32_t qOffset = 0)
629 {
630  armnn::Pooling2dDescriptor descriptor;
632  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
633  descriptor.m_StrideX = descriptor.m_StrideY = 7;
635 
636  armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
637  auto input = QuantizedVector<T>(
638  {
639  1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
640  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
641  0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
642  8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
643  0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
644  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
645  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
646  },
647  qScale, qOffset);
648 
649  armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
650  auto outputExpected = QuantizedVector<T>(
651  {
652  3.0f,
653  },
654  qScale, qOffset);
655 
656  return SimplePooling2dTestImpl<ArmnnType>(
657  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
658  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
659 }
660 
661 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
662 LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
663  armnn::IWorkloadFactory& workloadFactory,
665  const armnn::ITensorHandleFactory& tensorHandleFactory,
666  float qScale = 1.0f,
667  int32_t qOffset = 0)
668 {
669  armnn::Pooling2dDescriptor descriptor;
671  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
672  descriptor.m_StrideX = descriptor.m_StrideY = 9;
674 
675  armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
676  auto input = QuantizedVector<T>(
677  {
678  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
679  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
680  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
681  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
682  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
683  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
684  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
685  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
686  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
687  },
688  qScale, qOffset);
689 
690  armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
691  auto outputExpected = QuantizedVector<T>(
692  {
693  3.0f,
694  },
695  qScale, qOffset);
696 
697  return SimplePooling2dTestImpl<ArmnnType>(
698  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
699  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
700 }
701 
702 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
703 LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
704  armnn::IWorkloadFactory& workloadFactory,
706  const armnn::ITensorHandleFactory& tensorHandleFactory,
707  float qScale = 1.0f,
708  int32_t qOffset = 0)
709 {
710  armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
711  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
712 
713  armnn::Pooling2dDescriptor descriptor;
715  descriptor.m_PoolWidth = 2;
716  descriptor.m_PoolHeight = 3;
717  descriptor.m_StrideX = 2;
718  descriptor.m_StrideY = 1;
719  descriptor.m_PadLeft = 2;
720  descriptor.m_PadRight = 0;
721  descriptor.m_PadTop = 1;
722  descriptor.m_PadBottom = 2;
723  descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
724  descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
725 
726  // Construct input data.
727  auto input = QuantizedVector<T>(
728  {
729  1.0f, 3.0f, 4.0f,
730  },
731  qScale, qOffset);
732 
733  // These were calculated manually.
734  auto outputExpected = QuantizedVector<T>(
735  {
736  0.0f, 3.0f, 0.0f, 3.0f,
737  },
738  qScale, qOffset);
739 
740  return SimplePooling2dTestImpl<ArmnnType>(
741  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
742  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
743 }
744 
745 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
746 LayerTestResult<T, 4> ComparePooling2dTestCommon(
747  armnn::IWorkloadFactory& workloadFactory,
749  armnn::IWorkloadFactory& refWorkloadFactory,
750  const armnn::ITensorHandleFactory& tensorHandleFactory,
751  const armnn::ITensorHandleFactory& refTensorHandleFactory,
752  armnn::PoolingAlgorithm poolingType,
753  float qScale = 1.0f,
754  int32_t qOffset = 0)
755 {
756  IgnoreUnused(memoryManager);
757  const unsigned int inputWidth = 16;
758  const unsigned int inputHeight = 32;
759  const unsigned int channelCount = 2;
760  const unsigned int batchSize = 5;
761 
762  const unsigned int poolSize = 3;
763  const unsigned int strideX = 2;
764  const unsigned int strideY = 4;
765  const unsigned int padX = 0;
766  const unsigned int padY = 0;
767 
768  const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
769  const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
770 
771  armnn::TensorInfo inputTensorInfo;
772  armnn::TensorInfo outputTensorInfo;
773 
774  unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
775  unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
776 
777  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
778  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
779 
780  // Set quantization parameters if the requested type is a quantized type.
781  if(armnn::IsQuantizedType<T>())
782  {
783  inputTensorInfo.SetQuantizationScale(qScale);
784  inputTensorInfo.SetQuantizationOffset(qOffset);
785  outputTensorInfo.SetQuantizationScale(qScale);
786  outputTensorInfo.SetQuantizationOffset(qOffset);
787  }
788 
789  std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 81715);
790  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
791  std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
792 
793  LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
794 
795  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
796  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
797 
800  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
801  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
802  data.m_Parameters.m_PoolType = poolingType;
803  data.m_Parameters.m_PoolWidth = poolSize;
804  data.m_Parameters.m_PoolHeight = poolSize;
805  data.m_Parameters.m_StrideX = strideX;
806  data.m_Parameters.m_StrideY = strideY;
807  data.m_Parameters.m_PadLeft = padX;
808  data.m_Parameters.m_PadRight = padX;
809  data.m_Parameters.m_PadTop = padY;
810  data.m_Parameters.m_PadBottom = padY;
812 
813  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
814  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
815 
816  // Don't execute if Pooling is not supported, as an exception will be raised.
817  armnn::BackendId backend = workloadFactory.GetBackendId();
818  const size_t reasonIfUnsupportedMaxLen = 255;
819  char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
820  comparisonResult.m_Supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
821  data.m_Parameters,
822  reasonIfUnsupported, reasonIfUnsupportedMaxLen);
823  if (!comparisonResult.m_Supported)
824  {
825  return comparisonResult;
826  }
827 
828  armnn::Pooling2dQueueDescriptor refData = data;
829  armnn::WorkloadInfo refInfo = info;
830  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
831  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
832 
833  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
834  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
835 
836  outputHandleRef->Allocate();
837  inputHandleRef->Allocate();
838  inputHandle->Allocate();
839  outputHandle->Allocate();
840 
841  CopyDataToITensorHandle(inputHandle.get(), input.data());
842  CopyDataToITensorHandle(inputHandleRef.get(), input.data());
843 
844  workload->Execute();
845  workloadRef->Execute();
846 
847  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
848  CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
849 
850  comparisonResult.m_ActualData = actualOutput;
851  comparisonResult.m_ExpectedData = expectedOutput;
852 
853  return comparisonResult;
854 }
855 
856 //
857 // Tests max pooling with the following parameters:
858 //
859 // Pooling size: 2x2
860 // Stride: (2,2)
861 // input size: 4x4
862 // channels: 1
863 // batch size: 1
864 //
865 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
866 LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
867  armnn::IWorkloadFactory& workloadFactory,
869  const armnn::ITensorHandleFactory& tensorHandleFactory,
870  bool forceNoPadding,
871  float qScale = 1.0f,
872  int32_t qOffset = 0)
873 {
874  armnn::Pooling2dDescriptor descriptor;
876  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
877  descriptor.m_StrideX = 2;
878  descriptor.m_StrideY = 2;
879  descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
880  descriptor.m_PadTop = descriptor.m_PadBottom = 0;
883 
884 
885  unsigned int inputWidth = 4;
886 
887  unsigned int inputHeight = 4;
888 
889  unsigned int outputWidth =
890  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
891  descriptor.m_StrideX;
892  unsigned int outputHeight =
893  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
894  descriptor.m_StrideY;
895  unsigned int channels = 1;
896  unsigned int batchSize = 1;
897 
898  std::vector<float> inputData = {
899  510.0f, 222.0f, 780.0f, 654.0f,
900  141.0f, 276.0f, 15.0f, 546.0f,
901  303.0f, 618.0f, 582.0f, 339.0f,
902  438.0f, 564.0f, 573.0f, 402.0f
903  };
904 
905  // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
906  std::vector<float> expectedOutputDataWithPadding = {
907  0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
908  0.0f, 438.0f, 618.0f, 402.0f, 0.0f
909  };
910 
911  std::vector<float> expectedOutputDataNoPadding = {
912  510.0f, 780.0f,
913  618.0f, 582.0f
914  };
915 
916  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
917 
918  // Scale and offset should match input - we're just calculating maximum values.
919  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
920 
921  // Set quantization parameters if the requested type is a quantized type.
922  if(armnn::IsQuantizedType<T>())
923  {
924  inputTensorInfo.SetQuantizationScale(qScale);
925  inputTensorInfo.SetQuantizationOffset(qOffset);
926  outputTensorInfo.SetQuantizationScale(qScale);
927  outputTensorInfo.SetQuantizationOffset(qOffset);
928  }
929 
930  auto input = QuantizedVector<T>(inputData, qScale, qOffset);
931 
932  auto outputExpected =
933  forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
934  QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset);
935 
936  return SimplePooling2dTestImpl<ArmnnType>(
937  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
938  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
939 }
940 
941 //
942 // Tests max pooling with the following parameters:
943 //
944 // Pooling size: 3x2
945 // Stride: (2,2)
946 // input size: 3x2
947 // channels: 1
948 // batch size: 1
949 //
950 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
951 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
952  armnn::IWorkloadFactory& workloadFactory,
954  const armnn::ITensorHandleFactory& tensorHandleFactory,
955  bool forceNoPadding,
956  float qScale = 1.0f,
957  int32_t qOffset = 0)
958 {
959  armnn::Pooling2dDescriptor descriptor;
961  descriptor.m_PoolWidth = 3;
962  descriptor.m_PoolHeight = 2;
963  descriptor.m_StrideX = 2;
964  descriptor.m_StrideY = 2;
965  descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
966  descriptor.m_PadRight = descriptor.m_PadLeft;
967  descriptor.m_PadTop = 0;
968  descriptor.m_PadBottom = 0;
971 
972  unsigned int inputWidth = 3;
973  unsigned int inputHeight = 2;
974  unsigned int outputWidth =
975  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
976  descriptor.m_StrideX;
977  unsigned int outputHeight =
978  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
979  descriptor.m_StrideY;
980  unsigned int channels = 1;
981  unsigned int batchSize = 1;
982 
983  std::vector<float> inputData = {
984  3.0f, 6.0f, 9.0f,
985  12.0f, 15.0f, 18.0f,
986  };
987 
988  std::vector<float> expectedOutputDataWithPadding = {
989  6.0f, 8.0f,
990  };
991 
992  std::vector<float> expectedOutputDataNoPadding = {
993  10.5f,
994  };
995 
996  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
997 
998  // Scale and offset should match input - we're just calculating average values.
999  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
1000 
1001  // Set quantization parameters if the requested type is a quantized type.
1002  if(armnn::IsQuantizedType<T>())
1003  {
1004  inputTensorInfo.SetQuantizationScale(qScale);
1005  inputTensorInfo.SetQuantizationOffset(qOffset);
1006  outputTensorInfo.SetQuantizationScale(qScale);
1007  outputTensorInfo.SetQuantizationOffset(qOffset);
1008  }
1009 
1010  auto input = QuantizedVector<T>(inputData, qScale, qOffset);
1011 
1012  auto outputExpected =
1013  forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
1014  QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset);
1015 
1016  return SimplePooling2dTestImpl<ArmnnType>(
1017  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1018  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1019 }
1020 
1021 
1022 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1023 LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
1024  armnn::IWorkloadFactory& workloadFactory,
1026  const armnn::ITensorHandleFactory& tensorHandleFactory,
1027  float qScale = 1.0f,
1028  int32_t qOffset = 0)
1029 {
1030  armnn::Pooling2dDescriptor descriptor;
1032  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1033  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1034  descriptor.m_PadLeft = 1;
1035  descriptor.m_PadRight = 1;
1036  descriptor.m_PadTop = 1;
1037  descriptor.m_PadBottom = 1;
1039 
1040  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1041  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1042 
1043  // Set quantization parameters if the requested type is a quantized type.
1044  if(armnn::IsQuantizedType<T>())
1045  {
1046  inputTensorInfo.SetQuantizationScale(qScale);
1047  inputTensorInfo.SetQuantizationOffset(qOffset);
1048  outputTensorInfo.SetQuantizationScale(qScale);
1049  outputTensorInfo.SetQuantizationOffset(qOffset);
1050  }
1051 
1052  auto input = QuantizedVector<T>(
1053  {
1054  -1.0f, -2.0f, 3.0f, 4.0f,
1055  -1.0f, -2.0f, 3.0f, 4.0f,
1056  1.0f, 2.0f, -3.0f, -4.0f,
1057  1.0f, 2.0f, -3.0f, -4.0f,
1058  },
1059  qScale, qOffset);
1060 
1061  auto outputExpected = QuantizedVector<T>(
1062  {
1063  -1.0f, 3.0f, 4.0f,
1064  1.0f, 3.0f, 4.0f,
1065  1.0f, 2.0f, -4.0f,
1066  },
1067  qScale, qOffset);
1068 
1069  return SimplePooling2dTestImpl<ArmnnType>(
1070  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1071  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1072 }
1073 
1074 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1075 LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1076  armnn::IWorkloadFactory& workloadFactory,
1078  const armnn::ITensorHandleFactory& tensorHandleFactory,
1079  float qScale = 1.0f,
1080  int32_t qOffset = 0)
1081 {
1082  armnn::Pooling2dDescriptor descriptor;
1084  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1085  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1086  descriptor.m_PadLeft = 1;
1087  descriptor.m_PadRight = 1;
1088  descriptor.m_PadTop = 1;
1089  descriptor.m_PadBottom = 1;
1091 
1092  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1093  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1094 
1095  // Set quantization parameters if the requested type is a quantized type.
1096  if(armnn::IsQuantizedType<T>())
1097  {
1098  inputTensorInfo.SetQuantizationScale(qScale);
1099  inputTensorInfo.SetQuantizationOffset(qOffset);
1100  outputTensorInfo.SetQuantizationScale(qScale);
1101  outputTensorInfo.SetQuantizationOffset(qOffset);
1102  }
1103 
1104  auto input = QuantizedVector<T>(
1105  {
1106  -1.0f, -2.0f, 3.0f, 4.0f,
1107  -1.0f, -2.0f, 3.0f, 4.0f,
1108  1.0f, 2.0f, -3.0f, -4.0f,
1109  1.0f, 2.0f, -3.0f, -4.0f,
1110  },
1111  qScale, qOffset);
1112 
1113  auto outputExpected = QuantizedVector<T>(
1114  {
1115  -1.0f, 3.0f, 4.0f, 4.0f,
1116  2.0f, 3.0f, 4.0f, 4.0f,
1117  2.0f, 3.0f, 4.0f, 4.0f,
1118  2.0f, 2.0f, 2.0f, -3.0f,
1119  },
1120  qScale, qOffset);
1121 
1122  return SimplePooling2dTestImpl<ArmnnType>(
1123  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1124  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1125 }
1126 
1127 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1128 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1129  armnn::IWorkloadFactory& workloadFactory,
1131  const armnn::ITensorHandleFactory& tensorHandleFactory,
1132  float qScale = 1.0f,
1133  int32_t qOffset = 0)
1134 {
1135  armnn::Pooling2dDescriptor descriptor;
1137  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1138  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1139  descriptor.m_PadLeft = 1;
1140  descriptor.m_PadRight = 1;
1141  descriptor.m_PadTop = 1;
1142  descriptor.m_PadBottom = 1;
1144 
1145  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1146  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1147 
1148  // Set quantization parameters if the requested type is a quantized type.
1149  if(armnn::IsQuantizedType<T>())
1150  {
1151  inputTensorInfo.SetQuantizationScale(qScale);
1152  inputTensorInfo.SetQuantizationOffset(qOffset);
1153  outputTensorInfo.SetQuantizationScale(qScale);
1154  outputTensorInfo.SetQuantizationOffset(qOffset);
1155  }
1156 
1157  auto input = QuantizedVector<T>(
1158  {
1159  12.0f, 20.0f, 32.0f, 40.0f,
1160  12.0f, 20.0f, 32.0f, 40.0f,
1161  12.0f, 20.0f, 32.0f, 40.0f,
1162  12.0f, 20.0f, 32.0f, 40.0f,
1163  },
1164  qScale, qOffset);
1165 
1166  auto outputExpected = QuantizedVector<T>(
1167  {
1168  3.0f, 13.0f, 10.0f,
1169  6.0f, 26.0f, 20.0f,
1170  3.0f, 13.0f, 10.0f,
1171  },
1172  qScale, qOffset);
1173 
1174  return SimplePooling2dTestImpl<ArmnnType>(
1175  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1176  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1177 }
1178 
1179 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1180 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1181  armnn::IWorkloadFactory& workloadFactory,
1183  const armnn::ITensorHandleFactory& tensorHandleFactory,
1184  float qScale = 1.0f,
1185  int32_t qOffset = 0)
1186 {
1187  armnn::Pooling2dDescriptor descriptor;
1189  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1190  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1191  descriptor.m_PadLeft = 0;
1192  descriptor.m_PadRight = 0;
1193  descriptor.m_PadTop = 0;
1194  descriptor.m_PadBottom = 0;
1197 
1198  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1199  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
1200 
1201  // Set quantization parameters if the requested type is a quantized type.
1202  if(armnn::IsQuantizedType<T>())
1203  {
1204  inputTensorInfo.SetQuantizationScale(qScale);
1205  inputTensorInfo.SetQuantizationOffset(qOffset);
1206  outputTensorInfo.SetQuantizationScale(qScale);
1207  outputTensorInfo.SetQuantizationOffset(qOffset);
1208  }
1209 
1210  auto input = QuantizedVector<T>(
1211  {
1212  1.0f, 2.0f, 3.0f, 4.0f,
1213  1.0f, 2.0f, 3.0f, 4.0f,
1214  1.0f, 2.0f, 3.0f, 4.0f,
1215  1.0f, 2.0f, 3.0f, 4.0f,
1216  },
1217  qScale, qOffset);
1218 
1219  auto outputExpected = QuantizedVector<T>(
1220  {
1221  2.0f, 3.5f,
1222  2.0f, 3.5f
1223  },
1224  qScale, qOffset);
1225 
1226  return SimplePooling2dTestImpl<ArmnnType>(
1227  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1228  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1229 }
1230 
1231 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1232 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1233  armnn::IWorkloadFactory& workloadFactory,
1235  const armnn::ITensorHandleFactory& tensorHandleFactory,
1236  float qScale = 1.0f,
1237  int32_t qOffset = 0)
1238 {
1239  armnn::Pooling2dDescriptor descriptor;
1241  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1242  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1243  descriptor.m_PadLeft = 1;
1244  descriptor.m_PadRight = 1;
1245  descriptor.m_PadTop = 1;
1246  descriptor.m_PadBottom = 1;
1248 
1249  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1250  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1251 
1252  // Set quantization parameters if the requested type is a quantized type.
1253  if(armnn::IsQuantizedType<T>())
1254  {
1255  inputTensorInfo.SetQuantizationScale(qScale);
1256  inputTensorInfo.SetQuantizationOffset(qOffset);
1257  outputTensorInfo.SetQuantizationScale(qScale);
1258  outputTensorInfo.SetQuantizationOffset(qOffset);
1259  }
1260 
1261  auto input = QuantizedVector<T>(
1262  {
1263  9.0f, 27.0f, 18.0f, 36.0f,
1264  18.0f, 9.0f, 18.0f, 9.0f,
1265  27.0f, 18.0f, 9.0f, 27.0f,
1266  9.0f, 27.0f, 9.0f, 18.0f,
1267  },
1268  qScale, qOffset);
1269 
1270  auto outputExpected = QuantizedVector<T>(
1271  {
1272  7.0f, 11.0f, 13.0f, 9.0f,
1273  12.0f, 17.0f, 19.0f, 13.0f,
1274  12.0f, 16.0f, 16.0f, 10.0f,
1275  9.0f, 11.0f, 12.0f, 7.0f,
1276  },
1277  qScale, qOffset);
1278 
1279  return SimplePooling2dTestImpl<ArmnnType>(
1280  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1281  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1282 }
1283 
1284 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1285 LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1286  armnn::IWorkloadFactory& workloadFactory,
1288  const armnn::ITensorHandleFactory& tensorHandleFactory,
1289  float qScale = 1.0f,
1290  int32_t qOffset = 0)
1291 {
1292  armnn::Pooling2dDescriptor descriptor;
1294  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1295  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1296  descriptor.m_PadLeft = 1;
1297  descriptor.m_PadRight = 1;
1298  descriptor.m_PadTop = 1;
1299  descriptor.m_PadBottom = 1;
1301 
1302  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1303  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1304 
1305  // Set quantization parameters if the requested type is a quantized type.
1306  if(armnn::IsQuantizedType<T>())
1307  {
1308  inputTensorInfo.SetQuantizationScale(qScale);
1309  inputTensorInfo.SetQuantizationOffset(qOffset);
1310  outputTensorInfo.SetQuantizationScale(qScale);
1311  outputTensorInfo.SetQuantizationOffset(qOffset);
1312  }
1313 
1314  auto input = QuantizedVector<T>(
1315  {
1316  2.0f, 4.0f, 8.0f, 16.0f,
1317  4.0f, 2.0f, 2.0f, 4.0f,
1318  8.0f, 2.0f, 4.0f, 2.0f,
1319  16.0f, 2.0f, 2.0f, 8.0f,
1320  },
1321  qScale, qOffset);
1322 
1323  auto outputExpected = QuantizedVector<T>(
1324  {
1325  1.0f, 4.4721f, 8.0f,
1326  4.4721f, 2.6457f, 2.236f,
1327  8.0f, 1.4142f, 4.0f,
1328  },
1329  qScale, qOffset);
1330 
1331  return SimplePooling2dTestImpl<ArmnnType>(
1332  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1333  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1334 }
1335 
1336 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1337 LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1338  armnn::IWorkloadFactory& workloadFactory,
1340  const armnn::ITensorHandleFactory& tensorHandleFactory,
1341  float qScale = 1.0f,
1342  int32_t qOffset = 0)
1343 {
1344  armnn::Pooling2dDescriptor descriptor;
1346  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1347  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1348  descriptor.m_PadLeft = 1;
1349  descriptor.m_PadRight = 1;
1350  descriptor.m_PadTop = 1;
1351  descriptor.m_PadBottom = 1;
1353 
1354  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1355  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1356 
1357  // Set quantization parameters if the requested type is a quantized type.
1358  if(armnn::IsQuantizedType<T>())
1359  {
1360  inputTensorInfo.SetQuantizationScale(qScale);
1361  inputTensorInfo.SetQuantizationOffset(qOffset);
1362  outputTensorInfo.SetQuantizationScale(qScale);
1363  outputTensorInfo.SetQuantizationOffset(qOffset);
1364  }
1365 
1366  auto input = QuantizedVector<T>(
1367  {
1368  1.0f, 2.0f, 3.0f, 4.0f,
1369  1.0f, 2.0f, 3.0f, 4.0f,
1370  1.0f, 2.0f, 3.0f, 4.0f,
1371  1.0f, 2.0f, 3.0f, 4.0f,
1372  },
1373  qScale, qOffset);
1374 
1375  auto outputExpected = QuantizedVector<T>(
1376  {
1377  1.0540f, 1.7638f, 2.5385f, 2.3570f,
1378  1.2909f, 2.1602f, 3.1091f, 2.8867f,
1379  1.2909f, 2.1602f, 3.1091f, 2.8867f,
1380  1.0540f, 1.7638f, 2.5385f, 2.3570f,
1381  },
1382  qScale, qOffset);
1383 
1384  return SimplePooling2dTestImpl<ArmnnType>(
1385  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1386  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1387 }
1388 
1389 } // anonymous namespace
1390 
1392  armnn::IWorkloadFactory& workloadFactory,
1394  const armnn::ITensorHandleFactory& tensorHandleFactory,
1395  bool forceNoPadding)
1396 {
1397  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1398  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1399 }
1400 
1402  armnn::IWorkloadFactory& workloadFactory,
1404  const armnn::ITensorHandleFactory& tensorHandleFactory,
1405  bool forceNoPadding)
1406 {
1407  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
1408  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 3.0f, -5);
1409 }
1410 
1412  armnn::IWorkloadFactory& workloadFactory,
1414  const armnn::ITensorHandleFactory& tensorHandleFactory,
1415  bool forceNoPadding)
1416 {
1417  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
1418  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1419 }
1420 
1422  armnn::IWorkloadFactory& workloadFactory,
1424  const armnn::ITensorHandleFactory& tensorHandleFactory,
1425  bool forceNoPadding)
1426 {
1427  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1428  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1429 }
1430 
1432  armnn::IWorkloadFactory& workloadFactory,
1434  const armnn::ITensorHandleFactory& tensorHandleFactory,
1435  bool forceNoPadding)
1436 {
1437  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
1438  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 0.1f, 128);
1439 }
1440 
1442  armnn::IWorkloadFactory& workloadFactory,
1444  const armnn::ITensorHandleFactory& tensorHandleFactory,
1445  bool forceNoPadding)
1446 {
1447  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
1448  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1449 }
1450 
1452  armnn::IWorkloadFactory& workloadFactory,
1454  const armnn::ITensorHandleFactory& tensorHandleFactory,
1455  const armnn::DataLayout dataLayout)
1456 {
1457  return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1458  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1459 }
1460 
1462  armnn::IWorkloadFactory& workloadFactory,
1464  const armnn::ITensorHandleFactory& tensorHandleFactory,
1465  const armnn::DataLayout dataLayout)
1466 {
1467  return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1468  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1469 }
1470 
1472  armnn::IWorkloadFactory& workloadFactory,
1474  const armnn::ITensorHandleFactory& tensorHandleFactory,
1475  const armnn::DataLayout dataLayout)
1476 {
1477  return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1478  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1479 }
1481  armnn::IWorkloadFactory& workloadFactory,
1483  const armnn::ITensorHandleFactory& tensorHandleFactory)
1484 {
1485  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1486  workloadFactory, memoryManager, tensorHandleFactory);
1487 }
1488 
1490  armnn::IWorkloadFactory& workloadFactory,
1492  const armnn::ITensorHandleFactory& tensorHandleFactory)
1493 {
1494  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1495  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
1496 }
1497 
1499  armnn::IWorkloadFactory& workloadFactory,
1501  const armnn::ITensorHandleFactory& tensorHandleFactory)
1502 {
1503  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1504  workloadFactory, memoryManager, tensorHandleFactory);
1505 }
1506 
1508  armnn::IWorkloadFactory& workloadFactory,
1510  const armnn::ITensorHandleFactory& tensorHandleFactory)
1511 {
1512  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(
1513  workloadFactory, memoryManager, tensorHandleFactory);
1514 }
1515 
1517  armnn::IWorkloadFactory& workloadFactory,
1519  const armnn::ITensorHandleFactory& tensorHandleFactory)
1520 {
1521  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1522  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
1523 }
1524 
1526  armnn::IWorkloadFactory& workloadFactory,
1528  const armnn::ITensorHandleFactory& tensorHandleFactory)
1529 {
1530  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1531  workloadFactory, memoryManager, tensorHandleFactory);
1532 }
1533 
1535  armnn::IWorkloadFactory& workloadFactory,
1537  const armnn::ITensorHandleFactory& tensorHandleFactory,
1538  const armnn::DataLayout dataLayout)
1539 {
1540  return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1541  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1542 }
1543 
1545  armnn::IWorkloadFactory& workloadFactory,
1547  const armnn::ITensorHandleFactory& tensorHandleFactory,
1548  const armnn::DataLayout dataLayout)
1549 {
1550  return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1551  workloadFactory, memoryManager, tensorHandleFactory, dataLayout, 0.5, -1);
1552 }
1553 
1555  armnn::IWorkloadFactory& workloadFactory,
1557  const armnn::ITensorHandleFactory& tensorHandleFactory,
1558  const armnn::DataLayout dataLayout)
1559 {
1560  return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1561  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1562 }
1563 
1565  armnn::IWorkloadFactory& workloadFactory,
1567  const armnn::ITensorHandleFactory& tensorHandleFactory,
1568  bool forceNoPadding)
1569 {
1570  return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1571  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1572 }
1573 
1575  armnn::IWorkloadFactory& workloadFactory,
1577  const armnn::ITensorHandleFactory& tensorHandleFactory)
1578 {
1579  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(
1580  workloadFactory, memoryManager, tensorHandleFactory);
1581 }
1582 
1584  armnn::IWorkloadFactory& workloadFactory,
1586  const armnn::ITensorHandleFactory& tensorHandleFactory)
1587 {
1588  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1589  workloadFactory, memoryManager, tensorHandleFactory, 0.5, -1);
1590 }
1591 
1593  armnn::IWorkloadFactory& workloadFactory,
1595  const armnn::ITensorHandleFactory& tensorHandleFactory)
1596 {
1597  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1598  workloadFactory, memoryManager, tensorHandleFactory);
1599 }
1601  armnn::IWorkloadFactory& workloadFactory,
1603  const armnn::ITensorHandleFactory& tensorHandleFactory)
1604 {
1605  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1606  workloadFactory, memoryManager, tensorHandleFactory);
1607 }
1608 
1610  armnn::IWorkloadFactory& workloadFactory,
1612  const armnn::ITensorHandleFactory& tensorHandleFactory)
1613 {
1614  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1615  workloadFactory, memoryManager, tensorHandleFactory);
1616 }
1617 
1619  armnn::IWorkloadFactory& workloadFactory,
1621  const armnn::ITensorHandleFactory& tensorHandleFactory)
1622 {
1623  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1624  workloadFactory, memoryManager, tensorHandleFactory);
1625 }
1626 
1628  armnn::IWorkloadFactory& workloadFactory,
1630  const armnn::ITensorHandleFactory& tensorHandleFactory)
1631 {
1632  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1633  workloadFactory, memoryManager, tensorHandleFactory);
1634 }
1635 
1637  armnn::IWorkloadFactory& workloadFactory,
1639  const armnn::ITensorHandleFactory& tensorHandleFactory)
1640 {
1641  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
1642  workloadFactory, memoryManager, tensorHandleFactory);
1643 }
1644 
1646  armnn::IWorkloadFactory& workloadFactory,
1648  const armnn::ITensorHandleFactory& tensorHandleFactory)
1649 {
1650  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
1651  workloadFactory, memoryManager, tensorHandleFactory);
1652 }
1653 
1655  armnn::IWorkloadFactory& workloadFactory,
1657  const armnn::ITensorHandleFactory& tensorHandleFactory)
1658 {
1659  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(
1660  workloadFactory, memoryManager, tensorHandleFactory);
1661 }
1662 
1664  armnn::IWorkloadFactory& workloadFactory,
1666  const armnn::ITensorHandleFactory& tensorHandleFactory)
1667 {
1668  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1669  workloadFactory, memoryManager, tensorHandleFactory);
1670 }
1671 
1673  armnn::IWorkloadFactory& workloadFactory,
1675  const armnn::ITensorHandleFactory& tensorHandleFactory)
1676 {
1677  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1678  workloadFactory, memoryManager, tensorHandleFactory);
1679 }
1680 
1682  armnn::IWorkloadFactory& workloadFactory,
1684  const armnn::ITensorHandleFactory& tensorHandleFactory,
1685  const armnn::DataLayout dataLayout)
1686 {
1687  return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1688  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1689 }
1690 
1692  armnn::IWorkloadFactory& workloadFactory,
1694  const armnn::ITensorHandleFactory& tensorHandleFactory,
1695  const armnn::DataLayout dataLayout)
1696 {
1697  return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1698  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1699 }
1700 
1702  armnn::IWorkloadFactory& workloadFactory,
1704  const armnn::ITensorHandleFactory& tensorHandleFactory,
1705  const armnn::DataLayout dataLayout)
1706 {
1707  return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1708  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1709 }
1710 
1712  armnn::IWorkloadFactory& workloadFactory,
1714  const armnn::ITensorHandleFactory& tensorHandleFactory)
1715 {
1716  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(
1717  workloadFactory, memoryManager, tensorHandleFactory);
1718 }
1719 
1721  armnn::IWorkloadFactory& workloadFactory,
1723  const armnn::ITensorHandleFactory& tensorHandleFactory)
1724 {
1725  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(
1726  workloadFactory, memoryManager, tensorHandleFactory);
1727 }
1728 
1730  armnn::IWorkloadFactory& workloadFactory,
1732  const armnn::ITensorHandleFactory& tensorHandleFactory)
1733 {
1734  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(
1735  workloadFactory, memoryManager, tensorHandleFactory);
1736 }
1737 
1739  armnn::IWorkloadFactory& workloadFactory,
1741  const armnn::ITensorHandleFactory& tensorHandleFactory)
1742 {
1743  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(
1744  workloadFactory, memoryManager, tensorHandleFactory);
1745 }
1746 
1748  armnn::IWorkloadFactory& workloadFactory,
1750  const armnn::ITensorHandleFactory& tensorHandleFactory)
1751 {
1752  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(
1753  workloadFactory, memoryManager, tensorHandleFactory);
1754 }
1755 
1757  armnn::IWorkloadFactory& workloadFactory,
1759  const armnn::ITensorHandleFactory& tensorHandleFactory)
1760 {
1761  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(
1762  workloadFactory, memoryManager, tensorHandleFactory);
1763 }
1765  armnn::IWorkloadFactory& workloadFactory,
1767  const armnn::ITensorHandleFactory& tensorHandleFactory)
1768 {
1769  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(
1770  workloadFactory, memoryManager, tensorHandleFactory);
1771 }
1772 
1774  armnn::IWorkloadFactory& workloadFactory,
1776  const armnn::ITensorHandleFactory& tensorHandleFactory)
1777 {
1778  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(
1779  workloadFactory, memoryManager, tensorHandleFactory);
1780 }
1781 
1783  armnn::IWorkloadFactory& workloadFactory,
1785  const armnn::ITensorHandleFactory& tensorHandleFactory)
1786 {
1787  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(
1788  workloadFactory, memoryManager, tensorHandleFactory);
1789 }
1790 
1792  armnn::IWorkloadFactory& workloadFactory,
1794  const armnn::ITensorHandleFactory& tensorHandleFactory)
1795 {
1796  return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(
1797  workloadFactory, memoryManager, tensorHandleFactory);
1798 }
1799 
1801  armnn::IWorkloadFactory& workloadFactory,
1803  const armnn::ITensorHandleFactory& tensorHandleFactory)
1804 {
1805  return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(
1806  workloadFactory, memoryManager, tensorHandleFactory);
1807 }
1808 
1810  armnn::IWorkloadFactory& workloadFactory,
1812  const armnn::ITensorHandleFactory& tensorHandleFactory)
1813 {
1814  return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(
1815  workloadFactory, memoryManager, tensorHandleFactory);
1816 }
1817 
1819  armnn::IWorkloadFactory& workloadFactory,
1821  const armnn::ITensorHandleFactory& tensorHandleFactory)
1822 {
1823  return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(
1824  workloadFactory, memoryManager, tensorHandleFactory);
1825 }
1826 
1828  armnn::IWorkloadFactory& workloadFactory,
1830  const armnn::ITensorHandleFactory& tensorHandleFactory)
1831 {
1832  return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(
1833  workloadFactory, memoryManager, tensorHandleFactory);
1834 }
1835 
1837  armnn::IWorkloadFactory& workloadFactory,
1839  const armnn::ITensorHandleFactory& tensorHandleFactory)
1840 {
1841  return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(
1842  workloadFactory, memoryManager, tensorHandleFactory);
1843 }
1845  armnn::IWorkloadFactory& workloadFactory,
1847  const armnn::ITensorHandleFactory& tensorHandleFactory)
1848 {
1849  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1850  workloadFactory, memoryManager, tensorHandleFactory);
1851 }
1852 
1854  armnn::IWorkloadFactory& workloadFactory,
1856  const armnn::ITensorHandleFactory& tensorHandleFactory)
1857 {
1858  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1859  workloadFactory, memoryManager, tensorHandleFactory);
1860 }
1861 
1863  armnn::IWorkloadFactory& workloadFactory,
1865  const armnn::ITensorHandleFactory& tensorHandleFactory)
1866 {
1867  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1868  workloadFactory, memoryManager, tensorHandleFactory);
1869 }
1870 
1872  armnn::IWorkloadFactory& workloadFactory,
1874  const armnn::ITensorHandleFactory& tensorHandleFactory)
1875 {
1876  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(
1877  workloadFactory, memoryManager, tensorHandleFactory);
1878 }
1879 
1881  armnn::IWorkloadFactory& workloadFactory,
1883  const armnn::ITensorHandleFactory& tensorHandleFactory)
1884 {
1885  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1886  workloadFactory, memoryManager, tensorHandleFactory);
1887 }
1888 
1890  armnn::IWorkloadFactory& workloadFactory,
1892  const armnn::ITensorHandleFactory& tensorHandleFactory)
1893 {
1894  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1895  workloadFactory, memoryManager, tensorHandleFactory);
1896 }
1897 
1899  armnn::IWorkloadFactory& workloadFactory,
1901  const armnn::ITensorHandleFactory& tensorHandleFactory)
1902 {
1903  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(
1904  workloadFactory, memoryManager, tensorHandleFactory);
1905 }
1906 
1908  armnn::IWorkloadFactory& workloadFactory,
1910  const armnn::ITensorHandleFactory& tensorHandleFactory)
1911 {
1912  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1913  workloadFactory, memoryManager, tensorHandleFactory);
1914 }
1915 
1917  armnn::IWorkloadFactory& workloadFactory,
1919  const armnn::ITensorHandleFactory& tensorHandleFactory)
1920 {
1921  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(
1922  workloadFactory, memoryManager, tensorHandleFactory);
1923 }
1924 
1926  armnn::IWorkloadFactory& workloadFactory,
1928  armnn::IWorkloadFactory& refWorkloadFactory,
1929  const armnn::ITensorHandleFactory& tensorHandleFactory,
1930  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1931  armnn::PoolingAlgorithm poolingType)
1932 {
1933  return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1934  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
1935 }
1936 
1938  armnn::IWorkloadFactory& workloadFactory,
1940  armnn::IWorkloadFactory& refWorkloadFactory,
1941  const armnn::ITensorHandleFactory& tensorHandleFactory,
1942  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1943  armnn::PoolingAlgorithm poolingType)
1944 {
1945  return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1946  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
1947  poolingType, 0.1f, 128);
1948 }
1949 
1951  armnn::IWorkloadFactory& workloadFactory,
1953  armnn::IWorkloadFactory& refWorkloadFactory,
1954  const armnn::ITensorHandleFactory& tensorHandleFactory,
1955  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1956  armnn::PoolingAlgorithm poolingType)
1957 {
1958  return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
1959  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
1960 }
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
Definition: Tensor.cpp:181
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual const BackendId & GetBackendId() const =0
LayerTestResult< int16_t, 4 > SimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout
Definition: Types.hpp:49
unsigned int GetWidthIndex() const
LayerTestResult< int16_t, 4 > IgnorePaddingAveragePooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerTestResult< int16_t, 4 > LargeTensorsAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_PoolWidth
Pooling width value.
LayerTestResult< uint8_t, 4 > IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
LayerTestResult< float, 4 > IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
uint32_t m_PadTop
Padding top value in the height dimension.
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
void IgnoreUnused(Ts &&...)
PoolingAlgorithm
Definition: Types.hpp:123
LayerTestResult< float, 4 > L2Pooling2dSize9Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > ComparePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > AsymmetricNonSquarePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
unsigned int GetHeightIndex() const
LayerTestResult< float, 4 > L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > ComparePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
uint32_t m_PoolHeight
Pooling height value.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
LayerTestResult< float, 4 > SimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< float, 4 > LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
uint32_t m_PadRight
Padding right value in the width dimension.
LayerTestResult< float, 4 > SimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize7Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
LayerTestResult< uint8_t, 4 > IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ComparePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int16_t, 4 > L2Pooling2dSize7Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< uint8_t, 4 > AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
The padding fields count, but are ignored.
LayerTestResult< uint8_t, 4 > IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
LayerTestResult< int16_t, 4 > L2Pooling2dSize9Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
LayerTestResult< uint8_t, 4 > LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingMaxPooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< float, 4 > IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride1Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingL2Pooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
unsigned int GetChannelsIndex() const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
LayerTestResult< uint8_t, 4 > L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)