ArmNN
 22.05
Pooling2dTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Pooling2dTestImpl.hpp"
7 
9 #include <ResolveType.hpp>
10 
13 #include <armnnUtils/Permute.hpp>
14 
17 
18 #include <armnn/BackendHelper.hpp>
20 
23 
25 
26 namespace
27 {
28 
29 using namespace armnnUtils;
30 
31 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
32 LayerTestResult<T, 4> SimplePooling2dTestImpl(
33  armnn::IWorkloadFactory& workloadFactory,
35  const armnn::ITensorHandleFactory& tensorHandleFactory,
36  armnn::Pooling2dDescriptor descriptor,
37  float qScale,
38  int32_t qOffset,
39  const std::vector<T>& input,
40  const std::vector<T>& outputExpected,
41  const armnn::TensorShape& inputShape,
42  const armnn::TensorShape& outputShape)
43 {
44  IgnoreUnused(memoryManager);
45  const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
46  const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
47  auto heightIndex = dimensionIndices.GetHeightIndex();
48  auto widthIndex = dimensionIndices.GetWidthIndex();
49  auto channelsIndex = dimensionIndices.GetChannelsIndex();
50 
51  unsigned int inputHeight = armnn::numeric_cast<unsigned int>(inputShape[heightIndex]);
52  unsigned int inputWidth = armnn::numeric_cast<unsigned int>(inputShape[widthIndex]);
53  unsigned int inputChannels = armnn::numeric_cast<unsigned int>(inputShape[channelsIndex]);
54  unsigned int inputBatchSize = armnn::numeric_cast<unsigned int>(inputShape[0]);
55 
56  unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputShape[heightIndex]);
57  unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputShape[widthIndex]);
58  unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputShape[channelsIndex]);
59  unsigned int outputBatchSize = armnn::numeric_cast<unsigned int>(outputShape[0]);
60 
62  inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
63 
65  outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
66 
67  // Set quantization parameters if the requested type is a quantized type.
68  if(armnn::IsQuantizedType<T>())
69  {
70  inputTensorInfo.SetQuantizationScale(qScale);
71  inputTensorInfo.SetQuantizationOffset(qOffset);
72  outputTensorInfo.SetQuantizationScale(qScale);
73  outputTensorInfo.SetQuantizationOffset(qOffset);
74  }
75 
76  LayerTestResult<T, 4> result(outputTensorInfo);
77  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
78 
79  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
80  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
81 
82  armnn::Pooling2dQueueDescriptor queueDescriptor;
83  queueDescriptor.m_Parameters = descriptor;
84  queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
85 
86  armnn::WorkloadInfo workloadInfo;
87  AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
88  AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
89 
90  // Don't execute if Pooling is not supported, as an exception will be raised.
91  armnn::BackendId backend = workloadFactory.GetBackendId();
92 
93  auto handle = armnn::GetILayerSupportByBackendId(backend);
94  result.m_Supported = handle.IsPooling2dSupported(inputTensorInfo,
95  outputTensorInfo,
96  queueDescriptor.m_Parameters);
97  if (!result.m_Supported)
98  {
99  return result;
100  }
101 
102  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pooling2d,
103  queueDescriptor,
104  workloadInfo);
105 
106  inputHandle->Allocate();
107  outputHandle->Allocate();
108 
109  CopyDataToITensorHandle(inputHandle.get(), input.data());
110 
111  workload->Execute();
112 
113  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
114 
115  result.m_ActualData = actualOutput;
116  result.m_ExpectedData = outputExpected;
117 
118  return result;
119 }
120 
121 //
122 // Tests max pooling with the following parameters:
123 //
124 // Pooling size: 3x3
125 // Stride: (2,4)
126 // input size: 8x13
127 // channels: 2
128 // batch size: 2
129 //
130 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
131 LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
132  armnn::IWorkloadFactory& workloadFactory,
134  const armnn::ITensorHandleFactory& tensorHandleFactory,
135  bool forceNoPadding,
136  float qScale = 1.0f,
137  int32_t qOffset = 0)
138 {
139  armnn::Pooling2dDescriptor descriptor;
141  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
142  descriptor.m_StrideX = 2;
143  descriptor.m_StrideY = 4;
144  // forceNoPadding is mainly used for compatibility with ARM Compute.
145  // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
146  descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
147  descriptor.m_PadTop = descriptor.m_PadBottom = 0;
150 
151  unsigned int inputWidth = 8;
152  unsigned int inputHeight = 13;
153  unsigned int outputWidth =
154  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
155  descriptor.m_StrideX;
156  unsigned int outputHeight =
157  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
158  descriptor.m_StrideY;
159  unsigned int channels = 2;
160  unsigned int batchSize = 2;
161 
162  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
163  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
164 
165  // Set quantization parameters if the requested type is a quantized type.
166  if(armnn::IsQuantizedType<T>())
167  {
168  inputTensorInfo.SetQuantizationScale(qScale);
169  inputTensorInfo.SetQuantizationOffset(qOffset);
170  outputTensorInfo.SetQuantizationScale(qScale);
171  outputTensorInfo.SetQuantizationOffset(qOffset);
172  }
173 
174  std::vector<float> singleChannelData({
175  0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
176  1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
177  8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
178  8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
179  5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
180  1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
181  9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
182  1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
183  6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
184  8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
185  7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
186  4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
187  3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
188  });
189 
190  // Constructs input data.
191  std::vector<float> inputData;
192  auto negator = [](float f) { return -f; };
193 
194  // First image (two channels where the second channel is the negative of the first one).
195  inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
196  std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
197 
198  // Second image (same as first image).
199  inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
200  std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
201 
202  auto input = QuantizedVector<T>(inputData, qScale, qOffset);
203 
204  // These were calculated manually.
205  std::vector<T> outputExpected;
206  if (forceNoPadding)
207  {
208  outputExpected = QuantizedVector<T>(
209  {
210  8.0f, 8.0f, 8.0f,
211  9.0f, 7.0f, 9.0f,
212  9.0f, 9.0f, 9.0f,
213 
214  0.0f, 0.0f, -3.0f,
215  -1.0f, 0.0f, 0.0f,
216  -1.0f, -1.0f, -1.0f,
217 
218  8.0f, 8.0f, 8.0f,
219  9.0f, 7.0f, 9.0f,
220  9.0f, 9.0f, 9.0f,
221 
222  0.0f, 0.0f, -3.0f,
223  -1.0f, 0.0f, 0.0f,
224  -1.0f, -1.0f, -1.0f
225  },
226  qScale, qOffset);
227  }
228  else
229  {
230  outputExpected = QuantizedVector<T>(
231  {
232  0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
233  0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
234  0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
235 
236  0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
237  0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
238  0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
239 
240  0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
241  0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
242  0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
243 
244  0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
245  0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
246  0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
247  },
248  qScale, qOffset);
249  }
250 
251  return SimplePooling2dTestImpl<ArmnnType>(
252  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
253  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
254 }
255 
256 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
257 LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
258  armnn::IWorkloadFactory& workloadFactory,
260  const armnn::ITensorHandleFactory& tensorHandleFactory,
261  const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
262  float qScale = 1.0f,
263  int32_t qOffset = 0)
264 {
265  armnn::Pooling2dDescriptor descriptor;
267  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
268  descriptor.m_StrideX = descriptor.m_StrideY = 2;
270  descriptor.m_DataLayout = dataLayout;
271 
272  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
273  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
274 
275  // Set quantization parameters if the requested type is a quantized type.
276  if(armnn::IsQuantizedType<T>())
277  {
278  inputTensorInfo.SetQuantizationScale(qScale);
279  inputTensorInfo.SetQuantizationOffset(qOffset);
280  outputTensorInfo.SetQuantizationScale(qScale);
281  outputTensorInfo.SetQuantizationOffset(qOffset);
282  }
283 
284  std::vector<T> inputData(
285  QuantizedVector<T>({
286  1.0f, 2.0f, 5.0f, 6.0f,
287  3.0f, 4.0f, 7.0f, 8.0f,
288  9.0f, 10.0f, 13.0f, 14.0f,
289  11.0f, 12.0f, 15.0f, 16.0f,
290 
291  17.0f, 18.0f, 21.0f, 22.0f,
292  19.0f, 20.0f, 23.0f, 24.0f,
293  25.0f, 26.0f, 29.0f, 30.0f,
294  27.0f, 28.0f, 31.0f, 32.0f,
295  },
296  qScale, qOffset));
297 
298  std::vector<T> outputData(
299  QuantizedVector<T>({
300  4.0f, 8.0f,
301  12.0f, 16.0f,
302 
303  20.0f, 24.0f,
304  28.0f, 32.0f,
305  },
306  qScale, qOffset));
307 
308  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
309  if (dataLayout == armnn::DataLayout::NHWC)
310  {
311  std::vector<T> tmp(inputData.size());
312  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
313  inputData = tmp;
314 
315  std::vector<T> tmp1(outputData.size());
316  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
317  outputData = tmp1;
318  }
319 
320  return SimplePooling2dTestImpl<ArmnnType>(
321  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
322  inputData, outputData, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
323 }
324 
325 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
326 LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
327  armnn::IWorkloadFactory& workloadFactory,
329  const armnn::ITensorHandleFactory& tensorHandleFactory,
331  float qScale = 1.0f,
332  int32_t qOffset = 0)
333 {
334  armnn::Pooling2dDescriptor descriptor;
336  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
337  descriptor.m_StrideX = descriptor.m_StrideY = 2;
339  descriptor.m_DataLayout = dataLayout;
340 
341  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
342  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
343 
344  // Set quantization parameters if the requested type is a quantized type.
345  if(armnn::IsQuantizedType<T>())
346  {
347  inputTensorInfo.SetQuantizationScale(qScale);
348  inputTensorInfo.SetQuantizationOffset(qOffset);
349  outputTensorInfo.SetQuantizationScale(qScale);
350  outputTensorInfo.SetQuantizationOffset(qOffset);
351  }
352 
353  std::vector<T> inputData(
354  QuantizedVector<T>({
355  2.0f, 2.0f, 6.0f, 6.0f,
356  4.0f, 4.0f, 8.0f, 8.0f,
357  10.0f, 12.0f, 14.0f, 16.0f,
358  10.0f, 12.0f, 16.0f, 14.0f,
359 
360  18.0f, 20.0f, 24.0f, 22.0f,
361  20.0f, 18.0f, 22.0f, 24.0f,
362  26.0f, 28.0f, 0.0f, 0.0f,
363  26.0f, 28.0f, 0.0f, 0.0f,
364  },
365  qScale, qOffset));
366 
367  std::vector<T> outputData(
368  QuantizedVector<T>({
369  3.0f, 7.0f,
370  11.0f, 15.0f,
371 
372  19.0f, 23.0f,
373  27.0f, 0.0f,
374  },
375  qScale, qOffset));
376 
377  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
378  if (dataLayout == armnn::DataLayout::NHWC)
379  {
380  std::vector<T> tmp(inputData.size());
381  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
382  inputData = tmp;
383 
384  std::vector<T> tmp1(outputData.size());
385  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
386  outputData = tmp1;
387  }
388 
389  return SimplePooling2dTestImpl<ArmnnType>(
390  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
391  inputData, outputData, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
392 }
393 
394 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
395 LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
396  armnn::IWorkloadFactory& workloadFactory,
398  const armnn::ITensorHandleFactory& tensorHandleFactory,
399  float qScale = 1.0f,
400  int32_t qOffset = 0)
401 {
402  armnn::Pooling2dDescriptor descriptor;
404  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
405  descriptor.m_StrideX = descriptor.m_StrideY = 5;
406  descriptor.m_PadLeft = 50;
407  descriptor.m_PadRight = 50;
408  descriptor.m_PadTop = 50;
409  descriptor.m_PadBottom = 50;
411 
412  armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
413  armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
414 
415  // Set quantization parameters if the requested type is a quantized type.
416  if(armnn::IsQuantizedType<T>())
417  {
418  inputTensorInfo.SetQuantizationScale(qScale);
419  inputTensorInfo.SetQuantizationOffset(qOffset);
420  outputTensorInfo.SetQuantizationScale(qScale);
421  outputTensorInfo.SetQuantizationOffset(qOffset);
422  }
423 
424  std::vector<T> input;
425 
426  for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
427  {
428  input.push_back(1);
429  }
430 
431  std::vector<T> outputExpected;
432 
433  for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
434  {
435  outputExpected.push_back(1);
436  }
437 
438  return SimplePooling2dTestImpl<ArmnnType>(
439  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
440  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
441 }
442 
443 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
444 LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
445  armnn::IWorkloadFactory& workloadFactory,
447  const armnn::ITensorHandleFactory& tensorHandleFactory,
449  float qScale = 1.0f,
450  int32_t qOffset = 0)
451 {
452  armnn::Pooling2dDescriptor descriptor;
454  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
455  descriptor.m_StrideX = descriptor.m_StrideY = 2;
457  descriptor.m_DataLayout = dataLayout;
458 
459  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
460  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
461 
462  std::vector<T> inputData(
463  QuantizedVector<T>({
464  1.0f, 7.0f, 5.0f, 5.0f,
465  1.0f, 7.0f, 5.0f, 5.0f,
466  3.0f, 3.0f, 1.0f, 1.0f,
467  3.0f, 3.0f, 1.0f, 1.0f,
468 
469  1.0f, 7.0f, 0.0f, 0.0f,
470  1.0f, 7.0f, 2.0f, 0.0f,
471  0.0f, 2.0f, 1.0f, 1.0f,
472  0.0f, 0.0f, 1.0f, 1.0f,
473  },
474  qScale, qOffset));
475 
476  std::vector<T> outputData(
477  QuantizedVector<T>({
478  5.0f, 5.0f,
479  3.0f, 1.0f,
480 
481  5.0f, 1.0f,
482  1.0f, 1.0f,
483  },
484  qScale, qOffset));
485 
486  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
487  if (dataLayout == armnn::DataLayout::NHWC)
488  {
489  std::vector<T> tmp(inputData.size());
490  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
491  inputData = tmp;
492 
493  std::vector<T> tmp1(outputData.size());
494  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
495  outputData = tmp1;
496  }
497 
498  return SimplePooling2dTestImpl<ArmnnType>(
499  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
500  inputData, outputData, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
501 }
502 
503 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
504 LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
505  armnn::IWorkloadFactory& workloadFactory,
507  const armnn::ITensorHandleFactory& tensorHandleFactory,
508  float qScale = 1.0f,
509  int32_t qOffset = 0)
510 {
511  armnn::Pooling2dDescriptor descriptor;
513  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
514  descriptor.m_StrideX = descriptor.m_StrideY = 1;
516 
517  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
518  auto input = QuantizedVector<T>(
519  {
520  2.0f, 1.0f, 5.0f, 2.0f,
521  1.0f, 2.0f, 2.0f, 1.0f,
522  5.0f, 4.0f, 1.0f, 5.0f,
523  2.0f, 1.0f, 5.0f, 2.0f,
524  },
525  qScale, qOffset);
526 
527  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
528  auto outputExpected = QuantizedVector<T>(
529  {
530  3.0f, 3.0f,
531  3.0f, 3.0f,
532  },
533  qScale, qOffset);
534 
535  return SimplePooling2dTestImpl<ArmnnType>(
536  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
537  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
538 }
539 
540 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
541 LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
542  armnn::IWorkloadFactory& workloadFactory,
544  const armnn::ITensorHandleFactory& tensorHandleFactory,
545  float qScale = 1.0f,
546  int32_t qOffset = 0)
547 {
548  armnn::Pooling2dDescriptor descriptor;
550  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
551  descriptor.m_StrideX = descriptor.m_StrideY = 3;
553 
554  armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
555  auto input = QuantizedVector<T>(
556  {
557  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
558  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
559  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
560  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
561  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
562  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
563  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
564  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
565  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
566  },
567  qScale, qOffset);
568 
569  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
570  auto outputExpected = QuantizedVector<T>(
571  {
572  3.0f, 3.0f, 3.0f,
573  3.0f, 3.0f, 3.0f,
574  3.0f, 3.0f, 3.0f,
575  },
576  qScale, qOffset);
577 
578  return SimplePooling2dTestImpl<ArmnnType>(
579  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
580  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
581 }
582 
583 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
584 LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
585  armnn::IWorkloadFactory& workloadFactory,
587  const armnn::ITensorHandleFactory& tensorHandleFactory,
588  float qScale = 1.0f,
589  int32_t qOffset = 0)
590 {
591  armnn::Pooling2dDescriptor descriptor;
593  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
594  descriptor.m_StrideX = descriptor.m_StrideY = 4;
596 
597  armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
598  auto input = QuantizedVector<T>(
599  {
600  2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
601  1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
602  5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
603  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
604  2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
605  1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
606  5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
607  },
608  qScale, qOffset);
609 
610  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
611  auto outputExpected = QuantizedVector<T>(
612  {
613  3.0f, 3.0f,
614  3.0f, 3.0f,
615  },
616  qScale, qOffset);
617 
618  return SimplePooling2dTestImpl<ArmnnType>(
619  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
620  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
621 }
622 
623 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
624 LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
625  armnn::IWorkloadFactory& workloadFactory,
627  const armnn::ITensorHandleFactory& tensorHandleFactory,
628  float qScale = 1.0f,
629  int32_t qOffset = 0)
630 {
631  armnn::Pooling2dDescriptor descriptor;
633  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
634  descriptor.m_StrideX = descriptor.m_StrideY = 7;
636 
637  armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
638  auto input = QuantizedVector<T>(
639  {
640  1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
641  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
642  0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
643  8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
644  0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
645  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
646  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
647  },
648  qScale, qOffset);
649 
650  armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
651  auto outputExpected = QuantizedVector<T>(
652  {
653  3.0f,
654  },
655  qScale, qOffset);
656 
657  return SimplePooling2dTestImpl<ArmnnType>(
658  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
659  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
660 }
661 
662 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
663 LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
664  armnn::IWorkloadFactory& workloadFactory,
666  const armnn::ITensorHandleFactory& tensorHandleFactory,
667  float qScale = 1.0f,
668  int32_t qOffset = 0)
669 {
670  armnn::Pooling2dDescriptor descriptor;
672  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
673  descriptor.m_StrideX = descriptor.m_StrideY = 9;
675 
676  armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
677  auto input = QuantizedVector<T>(
678  {
679  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
680  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
681  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
682  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
683  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
684  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
685  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
686  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
687  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
688  },
689  qScale, qOffset);
690 
691  armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
692  auto outputExpected = QuantizedVector<T>(
693  {
694  3.0f,
695  },
696  qScale, qOffset);
697 
698  return SimplePooling2dTestImpl<ArmnnType>(
699  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
700  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
701 }
702 
703 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
704 LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
705  armnn::IWorkloadFactory& workloadFactory,
707  const armnn::ITensorHandleFactory& tensorHandleFactory,
708  float qScale = 1.0f,
709  int32_t qOffset = 0)
710 {
711  armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
712  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
713 
714  armnn::Pooling2dDescriptor descriptor;
716  descriptor.m_PoolWidth = 2;
717  descriptor.m_PoolHeight = 3;
718  descriptor.m_StrideX = 2;
719  descriptor.m_StrideY = 1;
720  descriptor.m_PadLeft = 2;
721  descriptor.m_PadRight = 0;
722  descriptor.m_PadTop = 1;
723  descriptor.m_PadBottom = 2;
724  descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
725  descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
726 
727  // Construct input data.
728  auto input = QuantizedVector<T>(
729  {
730  1.0f, 3.0f, 4.0f,
731  },
732  qScale, qOffset);
733 
734  // These were calculated manually.
735  auto outputExpected = QuantizedVector<T>(
736  {
737  0.0f, 3.0f, 0.0f, 3.0f,
738  },
739  qScale, qOffset);
740 
741  return SimplePooling2dTestImpl<ArmnnType>(
742  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
743  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
744 }
745 
746 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
747 LayerTestResult<T, 4> ComparePooling2dTestCommon(
748  armnn::IWorkloadFactory& workloadFactory,
750  armnn::IWorkloadFactory& refWorkloadFactory,
751  const armnn::ITensorHandleFactory& tensorHandleFactory,
752  const armnn::ITensorHandleFactory& refTensorHandleFactory,
753  armnn::PoolingAlgorithm poolingType,
754  float qScale = 1.0f,
755  int32_t qOffset = 0)
756 {
757  IgnoreUnused(memoryManager);
758  const unsigned int inputWidth = 16;
759  const unsigned int inputHeight = 32;
760  const unsigned int channelCount = 2;
761  const unsigned int batchSize = 5;
762 
763  const unsigned int poolSize = 3;
764  const unsigned int strideX = 2;
765  const unsigned int strideY = 4;
766  const unsigned int padX = 0;
767  const unsigned int padY = 0;
768 
769  const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
770  const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
771 
772  armnn::TensorInfo inputTensorInfo;
773  armnn::TensorInfo outputTensorInfo;
774 
775  unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
776  unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
777 
778  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
779  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
780 
781  // Set quantization parameters if the requested type is a quantized type.
782  if(armnn::IsQuantizedType<T>())
783  {
784  inputTensorInfo.SetQuantizationScale(qScale);
785  inputTensorInfo.SetQuantizationOffset(qOffset);
786  outputTensorInfo.SetQuantizationScale(qScale);
787  outputTensorInfo.SetQuantizationOffset(qOffset);
788  }
789 
790  std::vector<T> input = MakeRandomTensor<T>(inputTensorInfo, 81715);
791  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
792  std::vector<T> expectedOutput(outputTensorInfo.GetNumElements());
793 
794  LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
795 
796  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
797  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
798 
801  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
802  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
803  data.m_Parameters.m_PoolType = poolingType;
804  data.m_Parameters.m_PoolWidth = poolSize;
805  data.m_Parameters.m_PoolHeight = poolSize;
806  data.m_Parameters.m_StrideX = strideX;
807  data.m_Parameters.m_StrideY = strideY;
808  data.m_Parameters.m_PadLeft = padX;
809  data.m_Parameters.m_PadRight = padX;
810  data.m_Parameters.m_PadTop = padY;
811  data.m_Parameters.m_PadBottom = padY;
813 
814  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
815  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
816 
817  // Don't execute if Pooling is not supported, as an exception will be raised.
818  armnn::BackendId backend = workloadFactory.GetBackendId();
819 
820  auto handle = armnn::GetILayerSupportByBackendId(backend);
821  comparisonResult.m_Supported = handle.IsPooling2dSupported(inputTensorInfo,
822  outputTensorInfo,
823  data.m_Parameters);
824  if (!comparisonResult.m_Supported)
825  {
826  return comparisonResult;
827  }
828 
829  armnn::Pooling2dQueueDescriptor refData = data;
830  armnn::WorkloadInfo refInfo = info;
831  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
832  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
833 
834  std::unique_ptr<armnn::IWorkload> workload
835  = workloadFactory.CreateWorkload(armnn::LayerType::Pooling2d, data, info);
836  std::unique_ptr<armnn::IWorkload> workloadRef
837  = refWorkloadFactory.CreateWorkload(armnn::LayerType::Pooling2d, refData, refInfo);
838 
839  outputHandleRef->Allocate();
840  inputHandleRef->Allocate();
841  inputHandle->Allocate();
842  outputHandle->Allocate();
843 
844  CopyDataToITensorHandle(inputHandle.get(), input.data());
845  CopyDataToITensorHandle(inputHandleRef.get(), input.data());
846 
847  workload->Execute();
848  workloadRef->Execute();
849 
850  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
851  CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
852 
853  comparisonResult.m_ActualData = actualOutput;
854  comparisonResult.m_ExpectedData = expectedOutput;
855 
856  return comparisonResult;
857 }
858 
859 //
860 // Tests max pooling with the following parameters:
861 //
862 // Pooling size: 2x2
863 // Stride: (2,2)
864 // input size: 4x4
865 // channels: 1
866 // batch size: 1
867 //
868 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
869 LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
870  armnn::IWorkloadFactory& workloadFactory,
872  const armnn::ITensorHandleFactory& tensorHandleFactory,
873  bool forceNoPadding,
874  float qScale = 1.0f,
875  int32_t qOffset = 0)
876 {
877  armnn::Pooling2dDescriptor descriptor;
879  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
880  descriptor.m_StrideX = 2;
881  descriptor.m_StrideY = 2;
882  descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
883  descriptor.m_PadTop = descriptor.m_PadBottom = 0;
886 
887 
888  unsigned int inputWidth = 4;
889 
890  unsigned int inputHeight = 4;
891 
892  unsigned int outputWidth =
893  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
894  descriptor.m_StrideX;
895  unsigned int outputHeight =
896  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
897  descriptor.m_StrideY;
898  unsigned int channels = 1;
899  unsigned int batchSize = 1;
900 
901  std::vector<float> inputData = {
902  510.0f, 222.0f, 780.0f, 654.0f,
903  141.0f, 276.0f, 15.0f, 546.0f,
904  303.0f, 618.0f, 582.0f, 339.0f,
905  438.0f, 564.0f, 573.0f, 402.0f
906  };
907 
908  // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
909  std::vector<float> expectedOutputDataWithPadding = {
910  0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
911  0.0f, 438.0f, 618.0f, 402.0f, 0.0f
912  };
913 
914  std::vector<float> expectedOutputDataNoPadding = {
915  510.0f, 780.0f,
916  618.0f, 582.0f
917  };
918 
919  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
920 
921  // Scale and offset should match input - we're just calculating maximum values.
922  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
923 
924  // Set quantization parameters if the requested type is a quantized type.
925  if(armnn::IsQuantizedType<T>())
926  {
927  inputTensorInfo.SetQuantizationScale(qScale);
928  inputTensorInfo.SetQuantizationOffset(qOffset);
929  outputTensorInfo.SetQuantizationScale(qScale);
930  outputTensorInfo.SetQuantizationOffset(qOffset);
931  }
932 
933  auto input = QuantizedVector<T>(inputData, qScale, qOffset);
934 
935  auto outputExpected =
936  forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
937  QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset);
938 
939  return SimplePooling2dTestImpl<ArmnnType>(
940  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
941  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
942 }
943 
944 //
945 // Tests max pooling with the following parameters:
946 //
947 // Pooling size: 3x2
948 // Stride: (2,2)
949 // input size: 3x2
950 // channels: 1
951 // batch size: 1
952 //
953 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
954 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
955  armnn::IWorkloadFactory& workloadFactory,
957  const armnn::ITensorHandleFactory& tensorHandleFactory,
958  bool forceNoPadding,
959  float qScale = 1.0f,
960  int32_t qOffset = 0)
961 {
962  armnn::Pooling2dDescriptor descriptor;
964  descriptor.m_PoolWidth = 3;
965  descriptor.m_PoolHeight = 2;
966  descriptor.m_StrideX = 2;
967  descriptor.m_StrideY = 2;
968  descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
969  descriptor.m_PadRight = descriptor.m_PadLeft;
970  descriptor.m_PadTop = 0;
971  descriptor.m_PadBottom = 0;
974 
975  unsigned int inputWidth = 3;
976  unsigned int inputHeight = 2;
977  unsigned int outputWidth =
978  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
979  descriptor.m_StrideX;
980  unsigned int outputHeight =
981  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
982  descriptor.m_StrideY;
983  unsigned int channels = 1;
984  unsigned int batchSize = 1;
985 
986  std::vector<float> inputData = {
987  3.0f, 6.0f, 9.0f,
988  12.0f, 15.0f, 18.0f,
989  };
990 
991  std::vector<float> expectedOutputDataWithPadding = {
992  6.0f, 8.0f,
993  };
994 
995  std::vector<float> expectedOutputDataNoPadding = {
996  10.5f,
997  };
998 
999  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
1000 
1001  // Scale and offset should match input - we're just calculating average values.
1002  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
1003 
1004  // Set quantization parameters if the requested type is a quantized type.
1005  if(armnn::IsQuantizedType<T>())
1006  {
1007  inputTensorInfo.SetQuantizationScale(qScale);
1008  inputTensorInfo.SetQuantizationOffset(qOffset);
1009  outputTensorInfo.SetQuantizationScale(qScale);
1010  outputTensorInfo.SetQuantizationOffset(qOffset);
1011  }
1012 
1013  auto input = QuantizedVector<T>(inputData, qScale, qOffset);
1014 
1015  auto outputExpected =
1016  forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
1017  QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset);
1018 
1019  return SimplePooling2dTestImpl<ArmnnType>(
1020  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1021  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1022 }
1023 
1024 
1025 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1026 LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
1027  armnn::IWorkloadFactory& workloadFactory,
1029  const armnn::ITensorHandleFactory& tensorHandleFactory,
1030  float qScale = 1.0f,
1031  int32_t qOffset = 0)
1032 {
1033  armnn::Pooling2dDescriptor descriptor;
1035  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1036  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1037  descriptor.m_PadLeft = 1;
1038  descriptor.m_PadRight = 1;
1039  descriptor.m_PadTop = 1;
1040  descriptor.m_PadBottom = 1;
1042 
1043  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1044  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1045 
1046  // Set quantization parameters if the requested type is a quantized type.
1047  if(armnn::IsQuantizedType<T>())
1048  {
1049  inputTensorInfo.SetQuantizationScale(qScale);
1050  inputTensorInfo.SetQuantizationOffset(qOffset);
1051  outputTensorInfo.SetQuantizationScale(qScale);
1052  outputTensorInfo.SetQuantizationOffset(qOffset);
1053  }
1054 
1055  auto input = QuantizedVector<T>(
1056  {
1057  -1.0f, -2.0f, 3.0f, 4.0f,
1058  -1.0f, -2.0f, 3.0f, 4.0f,
1059  1.0f, 2.0f, -3.0f, -4.0f,
1060  1.0f, 2.0f, -3.0f, -4.0f,
1061  },
1062  qScale, qOffset);
1063 
1064  auto outputExpected = QuantizedVector<T>(
1065  {
1066  -1.0f, 3.0f, 4.0f,
1067  1.0f, 3.0f, 4.0f,
1068  1.0f, 2.0f, -4.0f,
1069  },
1070  qScale, qOffset);
1071 
1072  return SimplePooling2dTestImpl<ArmnnType>(
1073  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1074  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1075 }
1076 
1077 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1078 LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1079  armnn::IWorkloadFactory& workloadFactory,
1081  const armnn::ITensorHandleFactory& tensorHandleFactory,
1082  float qScale = 1.0f,
1083  int32_t qOffset = 0)
1084 {
1085  armnn::Pooling2dDescriptor descriptor;
1087  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1088  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1089  descriptor.m_PadLeft = 1;
1090  descriptor.m_PadRight = 1;
1091  descriptor.m_PadTop = 1;
1092  descriptor.m_PadBottom = 1;
1094 
1095  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1096  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1097 
1098  // Set quantization parameters if the requested type is a quantized type.
1099  if(armnn::IsQuantizedType<T>())
1100  {
1101  inputTensorInfo.SetQuantizationScale(qScale);
1102  inputTensorInfo.SetQuantizationOffset(qOffset);
1103  outputTensorInfo.SetQuantizationScale(qScale);
1104  outputTensorInfo.SetQuantizationOffset(qOffset);
1105  }
1106 
1107  auto input = QuantizedVector<T>(
1108  {
1109  -1.0f, -2.0f, 3.0f, 4.0f,
1110  -1.0f, -2.0f, 3.0f, 4.0f,
1111  1.0f, 2.0f, -3.0f, -4.0f,
1112  1.0f, 2.0f, -3.0f, -4.0f,
1113  },
1114  qScale, qOffset);
1115 
1116  auto outputExpected = QuantizedVector<T>(
1117  {
1118  -1.0f, 3.0f, 4.0f, 4.0f,
1119  2.0f, 3.0f, 4.0f, 4.0f,
1120  2.0f, 3.0f, 4.0f, 4.0f,
1121  2.0f, 2.0f, 2.0f, -3.0f,
1122  },
1123  qScale, qOffset);
1124 
1125  return SimplePooling2dTestImpl<ArmnnType>(
1126  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1127  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1128 }
1129 
1130 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1131 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1132  armnn::IWorkloadFactory& workloadFactory,
1134  const armnn::ITensorHandleFactory& tensorHandleFactory,
1135  float qScale = 1.0f,
1136  int32_t qOffset = 0)
1137 {
1138  armnn::Pooling2dDescriptor descriptor;
1140  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1141  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1142  descriptor.m_PadLeft = 1;
1143  descriptor.m_PadRight = 1;
1144  descriptor.m_PadTop = 1;
1145  descriptor.m_PadBottom = 1;
1147 
1148  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1149  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1150 
1151  // Set quantization parameters if the requested type is a quantized type.
1152  if(armnn::IsQuantizedType<T>())
1153  {
1154  inputTensorInfo.SetQuantizationScale(qScale);
1155  inputTensorInfo.SetQuantizationOffset(qOffset);
1156  outputTensorInfo.SetQuantizationScale(qScale);
1157  outputTensorInfo.SetQuantizationOffset(qOffset);
1158  }
1159 
1160  auto input = QuantizedVector<T>(
1161  {
1162  12.0f, 20.0f, 32.0f, 40.0f,
1163  12.0f, 20.0f, 32.0f, 40.0f,
1164  12.0f, 20.0f, 32.0f, 40.0f,
1165  12.0f, 20.0f, 32.0f, 40.0f,
1166  },
1167  qScale, qOffset);
1168 
1169  auto outputExpected = QuantizedVector<T>(
1170  {
1171  3.0f, 13.0f, 10.0f,
1172  6.0f, 26.0f, 20.0f,
1173  3.0f, 13.0f, 10.0f,
1174  },
1175  qScale, qOffset);
1176 
1177  return SimplePooling2dTestImpl<ArmnnType>(
1178  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1179  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1180 }
1181 
1182 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1183 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1184  armnn::IWorkloadFactory& workloadFactory,
1186  const armnn::ITensorHandleFactory& tensorHandleFactory,
1187  float qScale = 1.0f,
1188  int32_t qOffset = 0)
1189 {
1190  armnn::Pooling2dDescriptor descriptor;
1192  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1193  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1194  descriptor.m_PadLeft = 0;
1195  descriptor.m_PadRight = 0;
1196  descriptor.m_PadTop = 0;
1197  descriptor.m_PadBottom = 0;
1200 
1201  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1202  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
1203 
1204  // Set quantization parameters if the requested type is a quantized type.
1205  if(armnn::IsQuantizedType<T>())
1206  {
1207  inputTensorInfo.SetQuantizationScale(qScale);
1208  inputTensorInfo.SetQuantizationOffset(qOffset);
1209  outputTensorInfo.SetQuantizationScale(qScale);
1210  outputTensorInfo.SetQuantizationOffset(qOffset);
1211  }
1212 
1213  auto input = QuantizedVector<T>(
1214  {
1215  1.0f, 2.0f, 3.0f, 4.0f,
1216  1.0f, 2.0f, 3.0f, 4.0f,
1217  1.0f, 2.0f, 3.0f, 4.0f,
1218  1.0f, 2.0f, 3.0f, 4.0f,
1219  },
1220  qScale, qOffset);
1221 
1222  auto outputExpected = QuantizedVector<T>(
1223  {
1224  2.0f, 3.5f,
1225  2.0f, 3.5f
1226  },
1227  qScale, qOffset);
1228 
1229  return SimplePooling2dTestImpl<ArmnnType>(
1230  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1231  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1232 }
1233 
1234 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1235 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1236  armnn::IWorkloadFactory& workloadFactory,
1238  const armnn::ITensorHandleFactory& tensorHandleFactory,
1239  float qScale = 1.0f,
1240  int32_t qOffset = 0)
1241 {
1242  armnn::Pooling2dDescriptor descriptor;
1244  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1245  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1246  descriptor.m_PadLeft = 1;
1247  descriptor.m_PadRight = 1;
1248  descriptor.m_PadTop = 1;
1249  descriptor.m_PadBottom = 1;
1251 
1252  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1253  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1254 
1255  // Set quantization parameters if the requested type is a quantized type.
1256  if(armnn::IsQuantizedType<T>())
1257  {
1258  inputTensorInfo.SetQuantizationScale(qScale);
1259  inputTensorInfo.SetQuantizationOffset(qOffset);
1260  outputTensorInfo.SetQuantizationScale(qScale);
1261  outputTensorInfo.SetQuantizationOffset(qOffset);
1262  }
1263 
1264  auto input = QuantizedVector<T>(
1265  {
1266  9.0f, 27.0f, 18.0f, 36.0f,
1267  18.0f, 9.0f, 18.0f, 9.0f,
1268  27.0f, 18.0f, 9.0f, 27.0f,
1269  9.0f, 27.0f, 9.0f, 18.0f,
1270  },
1271  qScale, qOffset);
1272 
1273  auto outputExpected = QuantizedVector<T>(
1274  {
1275  7.0f, 11.0f, 13.0f, 9.0f,
1276  12.0f, 17.0f, 19.0f, 13.0f,
1277  12.0f, 16.0f, 16.0f, 10.0f,
1278  9.0f, 11.0f, 12.0f, 7.0f,
1279  },
1280  qScale, qOffset);
1281 
1282  return SimplePooling2dTestImpl<ArmnnType>(
1283  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1284  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1285 }
1286 
1287 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1288 LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1289  armnn::IWorkloadFactory& workloadFactory,
1291  const armnn::ITensorHandleFactory& tensorHandleFactory,
1292  float qScale = 1.0f,
1293  int32_t qOffset = 0)
1294 {
1295  armnn::Pooling2dDescriptor descriptor;
1297  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1298  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1299  descriptor.m_PadLeft = 1;
1300  descriptor.m_PadRight = 1;
1301  descriptor.m_PadTop = 1;
1302  descriptor.m_PadBottom = 1;
1304 
1305  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1306  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1307 
1308  // Set quantization parameters if the requested type is a quantized type.
1309  if(armnn::IsQuantizedType<T>())
1310  {
1311  inputTensorInfo.SetQuantizationScale(qScale);
1312  inputTensorInfo.SetQuantizationOffset(qOffset);
1313  outputTensorInfo.SetQuantizationScale(qScale);
1314  outputTensorInfo.SetQuantizationOffset(qOffset);
1315  }
1316 
1317  auto input = QuantizedVector<T>(
1318  {
1319  2.0f, 4.0f, 8.0f, 16.0f,
1320  4.0f, 2.0f, 2.0f, 4.0f,
1321  8.0f, 2.0f, 4.0f, 2.0f,
1322  16.0f, 2.0f, 2.0f, 8.0f,
1323  },
1324  qScale, qOffset);
1325 
1326  auto outputExpected = QuantizedVector<T>(
1327  {
1328  1.0f, 4.4721f, 8.0f,
1329  4.4721f, 2.6457f, 2.236f,
1330  8.0f, 1.4142f, 4.0f,
1331  },
1332  qScale, qOffset);
1333 
1334  return SimplePooling2dTestImpl<ArmnnType>(
1335  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1336  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1337 }
1338 
1339 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1340 LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1341  armnn::IWorkloadFactory& workloadFactory,
1343  const armnn::ITensorHandleFactory& tensorHandleFactory,
1344  float qScale = 1.0f,
1345  int32_t qOffset = 0)
1346 {
1347  armnn::Pooling2dDescriptor descriptor;
1349  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1350  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1351  descriptor.m_PadLeft = 1;
1352  descriptor.m_PadRight = 1;
1353  descriptor.m_PadTop = 1;
1354  descriptor.m_PadBottom = 1;
1356 
1357  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1358  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1359 
1360  // Set quantization parameters if the requested type is a quantized type.
1361  if(armnn::IsQuantizedType<T>())
1362  {
1363  inputTensorInfo.SetQuantizationScale(qScale);
1364  inputTensorInfo.SetQuantizationOffset(qOffset);
1365  outputTensorInfo.SetQuantizationScale(qScale);
1366  outputTensorInfo.SetQuantizationOffset(qOffset);
1367  }
1368 
1369  auto input = QuantizedVector<T>(
1370  {
1371  1.0f, 2.0f, 3.0f, 4.0f,
1372  1.0f, 2.0f, 3.0f, 4.0f,
1373  1.0f, 2.0f, 3.0f, 4.0f,
1374  1.0f, 2.0f, 3.0f, 4.0f,
1375  },
1376  qScale, qOffset);
1377 
1378  auto outputExpected = QuantizedVector<T>(
1379  {
1380  1.0540f, 1.7638f, 2.5385f, 2.3570f,
1381  1.2909f, 2.1602f, 3.1091f, 2.8867f,
1382  1.2909f, 2.1602f, 3.1091f, 2.8867f,
1383  1.0540f, 1.7638f, 2.5385f, 2.3570f,
1384  },
1385  qScale, qOffset);
1386 
1387  return SimplePooling2dTestImpl<ArmnnType>(
1388  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset,
1389  input, outputExpected, inputTensorInfo.GetShape(), outputTensorInfo.GetShape());
1390 }
1391 
1392 } // anonymous namespace
1393 
1395  armnn::IWorkloadFactory& workloadFactory,
1397  const armnn::ITensorHandleFactory& tensorHandleFactory,
1398  bool forceNoPadding)
1399 {
1400  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1401  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1402 }
1403 
1405  armnn::IWorkloadFactory& workloadFactory,
1407  const armnn::ITensorHandleFactory& tensorHandleFactory,
1408  bool forceNoPadding)
1409 {
1410  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
1411  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 3.0f, -5);
1412 }
1413 
1415  armnn::IWorkloadFactory& workloadFactory,
1417  const armnn::ITensorHandleFactory& tensorHandleFactory,
1418  bool forceNoPadding)
1419 {
1420  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
1421  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1422 }
1423 
1425  armnn::IWorkloadFactory& workloadFactory,
1427  const armnn::ITensorHandleFactory& tensorHandleFactory,
1428  bool forceNoPadding)
1429 {
1430  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1431  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1432 }
1433 
1435  armnn::IWorkloadFactory& workloadFactory,
1437  const armnn::ITensorHandleFactory& tensorHandleFactory,
1438  bool forceNoPadding)
1439 {
1440  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
1441  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 0.1f, 128);
1442 }
1443 
1445  armnn::IWorkloadFactory& workloadFactory,
1447  const armnn::ITensorHandleFactory& tensorHandleFactory,
1448  bool forceNoPadding)
1449 {
1450  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
1451  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1452 }
1453 
1455  armnn::IWorkloadFactory& workloadFactory,
1457  const armnn::ITensorHandleFactory& tensorHandleFactory,
1458  const armnn::DataLayout dataLayout)
1459 {
1460  return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1461  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1462 }
1463 
1465  armnn::IWorkloadFactory& workloadFactory,
1467  const armnn::ITensorHandleFactory& tensorHandleFactory,
1468  const armnn::DataLayout dataLayout)
1469 {
1470  return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1471  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1472 }
1473 
1475  armnn::IWorkloadFactory& workloadFactory,
1477  const armnn::ITensorHandleFactory& tensorHandleFactory,
1478  const armnn::DataLayout dataLayout)
1479 {
1480  return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1481  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1482 }
1484  armnn::IWorkloadFactory& workloadFactory,
1486  const armnn::ITensorHandleFactory& tensorHandleFactory)
1487 {
1488  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1489  workloadFactory, memoryManager, tensorHandleFactory);
1490 }
1491 
1493  armnn::IWorkloadFactory& workloadFactory,
1495  const armnn::ITensorHandleFactory& tensorHandleFactory)
1496 {
1497  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1498  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
1499 }
1500 
1502  armnn::IWorkloadFactory& workloadFactory,
1504  const armnn::ITensorHandleFactory& tensorHandleFactory)
1505 {
1506  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1507  workloadFactory, memoryManager, tensorHandleFactory);
1508 }
1509 
1511  armnn::IWorkloadFactory& workloadFactory,
1513  const armnn::ITensorHandleFactory& tensorHandleFactory)
1514 {
1515  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(
1516  workloadFactory, memoryManager, tensorHandleFactory);
1517 }
1518 
1520  armnn::IWorkloadFactory& workloadFactory,
1522  const armnn::ITensorHandleFactory& tensorHandleFactory)
1523 {
1524  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1525  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
1526 }
1527 
1529  armnn::IWorkloadFactory& workloadFactory,
1531  const armnn::ITensorHandleFactory& tensorHandleFactory)
1532 {
1533  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1534  workloadFactory, memoryManager, tensorHandleFactory);
1535 }
1536 
1538  armnn::IWorkloadFactory& workloadFactory,
1540  const armnn::ITensorHandleFactory& tensorHandleFactory,
1541  const armnn::DataLayout dataLayout)
1542 {
1543  return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1544  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1545 }
1546 
1548  armnn::IWorkloadFactory& workloadFactory,
1550  const armnn::ITensorHandleFactory& tensorHandleFactory,
1551  const armnn::DataLayout dataLayout)
1552 {
1553  return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1554  workloadFactory, memoryManager, tensorHandleFactory, dataLayout, 0.5, -1);
1555 }
1556 
1558  armnn::IWorkloadFactory& workloadFactory,
1560  const armnn::ITensorHandleFactory& tensorHandleFactory,
1561  const armnn::DataLayout dataLayout)
1562 {
1563  return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1564  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1565 }
1566 
1568  armnn::IWorkloadFactory& workloadFactory,
1570  const armnn::ITensorHandleFactory& tensorHandleFactory,
1571  bool forceNoPadding)
1572 {
1573  return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1574  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1575 }
1576 
1578  armnn::IWorkloadFactory& workloadFactory,
1580  const armnn::ITensorHandleFactory& tensorHandleFactory)
1581 {
1582  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(
1583  workloadFactory, memoryManager, tensorHandleFactory);
1584 }
1585 
1587  armnn::IWorkloadFactory& workloadFactory,
1589  const armnn::ITensorHandleFactory& tensorHandleFactory)
1590 {
1591  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1592  workloadFactory, memoryManager, tensorHandleFactory, 0.5, -1);
1593 }
1594 
1596  armnn::IWorkloadFactory& workloadFactory,
1598  const armnn::ITensorHandleFactory& tensorHandleFactory)
1599 {
1600  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1601  workloadFactory, memoryManager, tensorHandleFactory);
1602 }
1604  armnn::IWorkloadFactory& workloadFactory,
1606  const armnn::ITensorHandleFactory& tensorHandleFactory)
1607 {
1608  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1609  workloadFactory, memoryManager, tensorHandleFactory);
1610 }
1611 
1613  armnn::IWorkloadFactory& workloadFactory,
1615  const armnn::ITensorHandleFactory& tensorHandleFactory)
1616 {
1617  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1618  workloadFactory, memoryManager, tensorHandleFactory);
1619 }
1620 
1622  armnn::IWorkloadFactory& workloadFactory,
1624  const armnn::ITensorHandleFactory& tensorHandleFactory)
1625 {
1626  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1627  workloadFactory, memoryManager, tensorHandleFactory);
1628 }
1629 
1631  armnn::IWorkloadFactory& workloadFactory,
1633  const armnn::ITensorHandleFactory& tensorHandleFactory)
1634 {
1635  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1636  workloadFactory, memoryManager, tensorHandleFactory);
1637 }
1638 
1640  armnn::IWorkloadFactory& workloadFactory,
1642  const armnn::ITensorHandleFactory& tensorHandleFactory)
1643 {
1644  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
1645  workloadFactory, memoryManager, tensorHandleFactory);
1646 }
1647 
1649  armnn::IWorkloadFactory& workloadFactory,
1651  const armnn::ITensorHandleFactory& tensorHandleFactory)
1652 {
1653  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
1654  workloadFactory, memoryManager, tensorHandleFactory);
1655 }
1656 
1658  armnn::IWorkloadFactory& workloadFactory,
1660  const armnn::ITensorHandleFactory& tensorHandleFactory)
1661 {
1662  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(
1663  workloadFactory, memoryManager, tensorHandleFactory);
1664 }
1665 
1667  armnn::IWorkloadFactory& workloadFactory,
1669  const armnn::ITensorHandleFactory& tensorHandleFactory)
1670 {
1671  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1672  workloadFactory, memoryManager, tensorHandleFactory);
1673 }
1674 
1676  armnn::IWorkloadFactory& workloadFactory,
1678  const armnn::ITensorHandleFactory& tensorHandleFactory)
1679 {
1680  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1681  workloadFactory, memoryManager, tensorHandleFactory);
1682 }
1683 
1685  armnn::IWorkloadFactory& workloadFactory,
1687  const armnn::ITensorHandleFactory& tensorHandleFactory,
1688  const armnn::DataLayout dataLayout)
1689 {
1690  return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1691  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1692 }
1693 
1695  armnn::IWorkloadFactory& workloadFactory,
1697  const armnn::ITensorHandleFactory& tensorHandleFactory,
1698  const armnn::DataLayout dataLayout)
1699 {
1700  return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1701  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1702 }
1703 
1705  armnn::IWorkloadFactory& workloadFactory,
1707  const armnn::ITensorHandleFactory& tensorHandleFactory,
1708  const armnn::DataLayout dataLayout)
1709 {
1710  return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1711  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1712 }
1713 
1715  armnn::IWorkloadFactory& workloadFactory,
1717  const armnn::ITensorHandleFactory& tensorHandleFactory)
1718 {
1719  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(
1720  workloadFactory, memoryManager, tensorHandleFactory);
1721 }
1722 
1724  armnn::IWorkloadFactory& workloadFactory,
1726  const armnn::ITensorHandleFactory& tensorHandleFactory)
1727 {
1728  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(
1729  workloadFactory, memoryManager, tensorHandleFactory);
1730 }
1731 
1733  armnn::IWorkloadFactory& workloadFactory,
1735  const armnn::ITensorHandleFactory& tensorHandleFactory)
1736 {
1737  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(
1738  workloadFactory, memoryManager, tensorHandleFactory);
1739 }
1740 
1742  armnn::IWorkloadFactory& workloadFactory,
1744  const armnn::ITensorHandleFactory& tensorHandleFactory)
1745 {
1746  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(
1747  workloadFactory, memoryManager, tensorHandleFactory);
1748 }
1749 
1751  armnn::IWorkloadFactory& workloadFactory,
1753  const armnn::ITensorHandleFactory& tensorHandleFactory)
1754 {
1755  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(
1756  workloadFactory, memoryManager, tensorHandleFactory);
1757 }
1758 
1760  armnn::IWorkloadFactory& workloadFactory,
1762  const armnn::ITensorHandleFactory& tensorHandleFactory)
1763 {
1764  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(
1765  workloadFactory, memoryManager, tensorHandleFactory);
1766 }
1768  armnn::IWorkloadFactory& workloadFactory,
1770  const armnn::ITensorHandleFactory& tensorHandleFactory)
1771 {
1772  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(
1773  workloadFactory, memoryManager, tensorHandleFactory);
1774 }
1775 
1777  armnn::IWorkloadFactory& workloadFactory,
1779  const armnn::ITensorHandleFactory& tensorHandleFactory)
1780 {
1781  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(
1782  workloadFactory, memoryManager, tensorHandleFactory);
1783 }
1784 
1786  armnn::IWorkloadFactory& workloadFactory,
1788  const armnn::ITensorHandleFactory& tensorHandleFactory)
1789 {
1790  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(
1791  workloadFactory, memoryManager, tensorHandleFactory);
1792 }
1793 
1795  armnn::IWorkloadFactory& workloadFactory,
1797  const armnn::ITensorHandleFactory& tensorHandleFactory)
1798 {
1799  return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(
1800  workloadFactory, memoryManager, tensorHandleFactory);
1801 }
1802 
1804  armnn::IWorkloadFactory& workloadFactory,
1806  const armnn::ITensorHandleFactory& tensorHandleFactory)
1807 {
1808  return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(
1809  workloadFactory, memoryManager, tensorHandleFactory);
1810 }
1811 
1813  armnn::IWorkloadFactory& workloadFactory,
1815  const armnn::ITensorHandleFactory& tensorHandleFactory)
1816 {
1817  return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(
1818  workloadFactory, memoryManager, tensorHandleFactory);
1819 }
1820 
1822  armnn::IWorkloadFactory& workloadFactory,
1824  const armnn::ITensorHandleFactory& tensorHandleFactory)
1825 {
1826  return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(
1827  workloadFactory, memoryManager, tensorHandleFactory);
1828 }
1829 
1831  armnn::IWorkloadFactory& workloadFactory,
1833  const armnn::ITensorHandleFactory& tensorHandleFactory)
1834 {
1835  return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(
1836  workloadFactory, memoryManager, tensorHandleFactory);
1837 }
1838 
1840  armnn::IWorkloadFactory& workloadFactory,
1842  const armnn::ITensorHandleFactory& tensorHandleFactory)
1843 {
1844  return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(
1845  workloadFactory, memoryManager, tensorHandleFactory);
1846 }
1848  armnn::IWorkloadFactory& workloadFactory,
1850  const armnn::ITensorHandleFactory& tensorHandleFactory)
1851 {
1852  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1853  workloadFactory, memoryManager, tensorHandleFactory);
1854 }
1855 
1857  armnn::IWorkloadFactory& workloadFactory,
1859  const armnn::ITensorHandleFactory& tensorHandleFactory)
1860 {
1861  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1862  workloadFactory, memoryManager, tensorHandleFactory);
1863 }
1864 
1866  armnn::IWorkloadFactory& workloadFactory,
1868  const armnn::ITensorHandleFactory& tensorHandleFactory)
1869 {
1870  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1871  workloadFactory, memoryManager, tensorHandleFactory);
1872 }
1873 
1875  armnn::IWorkloadFactory& workloadFactory,
1877  const armnn::ITensorHandleFactory& tensorHandleFactory)
1878 {
1879  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(
1880  workloadFactory, memoryManager, tensorHandleFactory);
1881 }
1882 
1884  armnn::IWorkloadFactory& workloadFactory,
1886  const armnn::ITensorHandleFactory& tensorHandleFactory)
1887 {
1888  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1889  workloadFactory, memoryManager, tensorHandleFactory);
1890 }
1891 
1893  armnn::IWorkloadFactory& workloadFactory,
1895  const armnn::ITensorHandleFactory& tensorHandleFactory)
1896 {
1897  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1898  workloadFactory, memoryManager, tensorHandleFactory);
1899 }
1900 
1902  armnn::IWorkloadFactory& workloadFactory,
1904  const armnn::ITensorHandleFactory& tensorHandleFactory)
1905 {
1906  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(
1907  workloadFactory, memoryManager, tensorHandleFactory);
1908 }
1909 
1911  armnn::IWorkloadFactory& workloadFactory,
1913  const armnn::ITensorHandleFactory& tensorHandleFactory)
1914 {
1915  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1916  workloadFactory, memoryManager, tensorHandleFactory);
1917 }
1918 
1920  armnn::IWorkloadFactory& workloadFactory,
1922  const armnn::ITensorHandleFactory& tensorHandleFactory)
1923 {
1924  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(
1925  workloadFactory, memoryManager, tensorHandleFactory);
1926 }
1927 
1929  armnn::IWorkloadFactory& workloadFactory,
1931  armnn::IWorkloadFactory& refWorkloadFactory,
1932  const armnn::ITensorHandleFactory& tensorHandleFactory,
1933  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1934  armnn::PoolingAlgorithm poolingType)
1935 {
1936  return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1937  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
1938 }
1939 
1941  armnn::IWorkloadFactory& workloadFactory,
1943  armnn::IWorkloadFactory& refWorkloadFactory,
1944  const armnn::ITensorHandleFactory& tensorHandleFactory,
1945  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1946  armnn::PoolingAlgorithm poolingType)
1947 {
1948  return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1949  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
1950  poolingType, 0.1f, 128);
1951 }
1952 
1954  armnn::IWorkloadFactory& workloadFactory,
1956  armnn::IWorkloadFactory& refWorkloadFactory,
1957  const armnn::ITensorHandleFactory& tensorHandleFactory,
1958  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1959  armnn::PoolingAlgorithm poolingType)
1960 {
1961  return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
1962  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
1963 }
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
Definition: Tensor.cpp:181
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual const BackendId & GetBackendId() const =0
LayerTestResult< int16_t, 4 > SimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout
Definition: Types.hpp:62
unsigned int GetWidthIndex() const
LayerTestResult< int16_t, 4 > IgnorePaddingAveragePooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerTestResult< int16_t, 4 > LargeTensorsAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_PoolWidth
Pooling width value.
LayerTestResult< uint8_t, 4 > IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
LayerTestResult< float, 4 > IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
uint32_t m_PadTop
Padding top value in the height dimension.
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
void IgnoreUnused(Ts &&...)
PoolingAlgorithm
Definition: Types.hpp:136
LayerTestResult< float, 4 > L2Pooling2dSize9Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > ComparePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > AsymmetricNonSquarePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
unsigned int GetHeightIndex() const
LayerTestResult< float, 4 > L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > ComparePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
uint32_t m_PoolHeight
Pooling height value.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
LayerTestResult< float, 4 > SimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< float, 4 > LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
uint32_t m_PadRight
Padding right value in the width dimension.
LayerTestResult< float, 4 > SimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize7Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
LayerTestResult< uint8_t, 4 > IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ComparePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
LayerTestResult< int16_t, 4 > L2Pooling2dSize7Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerTestResult< uint8_t, 4 > AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
The padding fields count, but are ignored.
LayerTestResult< uint8_t, 4 > IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< float, 4 > IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
LayerTestResult< int16_t, 4 > L2Pooling2dSize9Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
LayerTestResult< uint8_t, 4 > LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:489
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingMaxPooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< float, 4 > IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId &backend)
Convenience function to retrieve the ILayerSupportHandle for a backend.
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride1Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingL2Pooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
unsigned int GetChannelsIndex() const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
LayerTestResult< uint8_t, 4 > L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)