ArmNN
 21.02
Pooling2dTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Pooling2dTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 #include <armnn/LayerSupport.hpp>
12 
15 #include <armnnUtils/Permute.hpp>
16 
19 
21 
24 
25 #include <test/TensorHelpers.hpp>
26 
27 namespace
28 {
29 
30 using namespace armnnUtils;
31 
32 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
33 LayerTestResult<T, 4> SimplePooling2dTestImpl(
34  armnn::IWorkloadFactory& workloadFactory,
36  const armnn::ITensorHandleFactory& tensorHandleFactory,
37  armnn::Pooling2dDescriptor descriptor,
38  float qScale,
39  int32_t qOffset,
40  const boost::multi_array<T, 4>& input,
41  const boost::multi_array<T, 4>& outputExpected)
42 {
43  IgnoreUnused(memoryManager);
44  const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
45  const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
46  auto heightIndex = dimensionIndices.GetHeightIndex();
47  auto widthIndex = dimensionIndices.GetWidthIndex();
48  auto channelsIndex = dimensionIndices.GetChannelsIndex();
49 
50  unsigned int inputHeight = armnn::numeric_cast<unsigned int>(input.shape()[heightIndex]);
51  unsigned int inputWidth = armnn::numeric_cast<unsigned int>(input.shape()[widthIndex]);
52  unsigned int inputChannels = armnn::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
53  unsigned int inputBatchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
54 
55  unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
56  unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
57  unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
58  unsigned int outputBatchSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[0]);
59 
61  inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
62 
64  outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
65 
66  // Set quantization parameters if the requested type is a quantized type.
67  if(armnn::IsQuantizedType<T>())
68  {
69  inputTensorInfo.SetQuantizationScale(qScale);
70  inputTensorInfo.SetQuantizationOffset(qOffset);
71  outputTensorInfo.SetQuantizationScale(qScale);
72  outputTensorInfo.SetQuantizationOffset(qOffset);
73  }
74 
75  LayerTestResult<T, 4> result(outputTensorInfo);
76 
77  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
78  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
79 
80  armnn::Pooling2dQueueDescriptor queueDescriptor;
81  queueDescriptor.m_Parameters = descriptor;
82  queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
83 
84  armnn::WorkloadInfo workloadInfo;
85  AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
86  AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
87 
88  // Don't execute if Pooling is not supported, as an exception will be raised.
89  armnn::BackendId backend = workloadFactory.GetBackendId();
90  const size_t reasonIfUnsupportedMaxLen = 255;
91  char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
92  result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
93  queueDescriptor.m_Parameters,
94  reasonIfUnsupported, reasonIfUnsupportedMaxLen);
95  if (!result.supported)
96  {
97  return result;
98  }
99 
100  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
101 
102  inputHandle->Allocate();
103  outputHandle->Allocate();
104 
105  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
106 
107  workload->Execute();
108 
109  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
110 
111  result.outputExpected = outputExpected;
112 
113  return result;
114 }
115 
116 //
117 // Tests max pooling with the following parameters:
118 //
119 // Pooling size: 3x3
120 // Stride: (2,4)
121 // input size: 8x13
122 // channels: 2
123 // batch size: 2
124 //
125 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
126 LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
127  armnn::IWorkloadFactory& workloadFactory,
129  const armnn::ITensorHandleFactory& tensorHandleFactory,
130  bool forceNoPadding,
131  float qScale = 1.0f,
132  int32_t qOffset = 0)
133 {
134  armnn::Pooling2dDescriptor descriptor;
136  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
137  descriptor.m_StrideX = 2;
138  descriptor.m_StrideY = 4;
139  // forceNoPadding is mainly used for compatibility with ARM Compute.
140  // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
141  descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
142  descriptor.m_PadTop = descriptor.m_PadBottom = 0;
145 
146  unsigned int inputWidth = 8;
147  unsigned int inputHeight = 13;
148  unsigned int outputWidth =
149  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
150  descriptor.m_StrideX;
151  unsigned int outputHeight =
152  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
153  descriptor.m_StrideY;
154  unsigned int channels = 2;
155  unsigned int batchSize = 2;
156 
157  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
158  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
159 
160  // Set quantization parameters if the requested type is a quantized type.
161  if(armnn::IsQuantizedType<T>())
162  {
163  inputTensorInfo.SetQuantizationScale(qScale);
164  inputTensorInfo.SetQuantizationOffset(qOffset);
165  outputTensorInfo.SetQuantizationScale(qScale);
166  outputTensorInfo.SetQuantizationOffset(qOffset);
167  }
168 
169  std::vector<float> singleChannelData({
170  0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
171  1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
172  8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
173  8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
174  5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
175  1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
176  9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
177  1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
178  6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
179  8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
180  7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
181  4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
182  3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
183  });
184 
185  // Constructs input data.
186  std::vector<float> inputData;
187  auto negator = [](float f) { return -f; };
188 
189  // First image (two channels where the second channel is the negative of the first one).
190  inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
191  std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
192 
193  // Second image (same as first image).
194  inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
195  std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
196 
197  auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
198 
199  // These were calculated manually.
200  auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
201  boost::multi_array<T, 4> outputExpected(shape);
202  if (forceNoPadding)
203  {
204  outputExpected = MakeTensor<T, 4>(outputTensorInfo,
205  QuantizedVector<T>({
206  8.0f, 8.0f, 8.0f,
207  9.0f, 7.0f, 9.0f,
208  9.0f, 9.0f, 9.0f,
209 
210  0.0f, 0.0f, -3.0f,
211  -1.0f, 0.0f, 0.0f,
212  -1.0f, -1.0f, -1.0f,
213 
214  8.0f, 8.0f, 8.0f,
215  9.0f, 7.0f, 9.0f,
216  9.0f, 9.0f, 9.0f,
217 
218  0.0f, 0.0f, -3.0f,
219  -1.0f, 0.0f, 0.0f,
220  -1.0f, -1.0f, -1.0f
221  },
222  qScale, qOffset));
223  }
224  else
225  {
226  outputExpected = MakeTensor<T, 4>(outputTensorInfo,
227  QuantizedVector<T>({
228  0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
229  0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
230  0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
231 
232  0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
233  0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
234  0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
235 
236  0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
237  0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
238  0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
239 
240  0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
241  0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
242  0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
243  },
244  qScale, qOffset));
245  }
246 
247  return SimplePooling2dTestImpl<ArmnnType>(
248  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
249 }
250 
251 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
252 LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
253  armnn::IWorkloadFactory& workloadFactory,
255  const armnn::ITensorHandleFactory& tensorHandleFactory,
256  const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
257  float qScale = 1.0f,
258  int32_t qOffset = 0)
259 {
260  armnn::Pooling2dDescriptor descriptor;
262  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
263  descriptor.m_StrideX = descriptor.m_StrideY = 2;
265  descriptor.m_DataLayout = dataLayout;
266 
267  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
268  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
269 
270  // Set quantization parameters if the requested type is a quantized type.
271  if(armnn::IsQuantizedType<T>())
272  {
273  inputTensorInfo.SetQuantizationScale(qScale);
274  inputTensorInfo.SetQuantizationOffset(qOffset);
275  outputTensorInfo.SetQuantizationScale(qScale);
276  outputTensorInfo.SetQuantizationOffset(qOffset);
277  }
278 
279  std::vector<T> inputData(
280  QuantizedVector<T>({
281  1.0f, 2.0f, 5.0f, 6.0f,
282  3.0f, 4.0f, 7.0f, 8.0f,
283  9.0f, 10.0f, 13.0f, 14.0f,
284  11.0f, 12.0f, 15.0f, 16.0f,
285 
286  17.0f, 18.0f, 21.0f, 22.0f,
287  19.0f, 20.0f, 23.0f, 24.0f,
288  25.0f, 26.0f, 29.0f, 30.0f,
289  27.0f, 28.0f, 31.0f, 32.0f,
290  },
291  qScale, qOffset));
292 
293  std::vector<T> outputData(
294  QuantizedVector<T>({
295  4.0f, 8.0f,
296  12.0f, 16.0f,
297 
298  20.0f, 24.0f,
299  28.0f, 32.0f,
300  },
301  qScale, qOffset));
302 
303  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
304  if (dataLayout == armnn::DataLayout::NHWC)
305  {
306  std::vector<T> tmp(inputData.size());
307  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
308  inputData = tmp;
309 
310  std::vector<T> tmp1(outputData.size());
311  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
312  outputData = tmp1;
313  }
314 
315  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
316 
317  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
318 
319  return SimplePooling2dTestImpl<ArmnnType>(
320  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
321 }
322 
323 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
324 LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
325  armnn::IWorkloadFactory& workloadFactory,
327  const armnn::ITensorHandleFactory& tensorHandleFactory,
329  float qScale = 1.0f,
330  int32_t qOffset = 0)
331 {
332  armnn::Pooling2dDescriptor descriptor;
334  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
335  descriptor.m_StrideX = descriptor.m_StrideY = 2;
337  descriptor.m_DataLayout = dataLayout;
338 
339  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
340  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
341 
342  // Set quantization parameters if the requested type is a quantized type.
343  if(armnn::IsQuantizedType<T>())
344  {
345  inputTensorInfo.SetQuantizationScale(qScale);
346  inputTensorInfo.SetQuantizationOffset(qOffset);
347  outputTensorInfo.SetQuantizationScale(qScale);
348  outputTensorInfo.SetQuantizationOffset(qOffset);
349  }
350 
351  std::vector<T> inputData(
352  QuantizedVector<T>({
353  2.0f, 2.0f, 6.0f, 6.0f,
354  4.0f, 4.0f, 8.0f, 8.0f,
355  10.0f, 12.0f, 14.0f, 16.0f,
356  10.0f, 12.0f, 16.0f, 14.0f,
357 
358  18.0f, 20.0f, 24.0f, 22.0f,
359  20.0f, 18.0f, 22.0f, 24.0f,
360  26.0f, 28.0f, 0.0f, 0.0f,
361  26.0f, 28.0f, 0.0f, 0.0f,
362  },
363  qScale, qOffset));
364 
365  std::vector<T> outputData(
366  QuantizedVector<T>({
367  3.0f, 7.0f,
368  11.0f, 15.0f,
369 
370  19.0f, 23.0f,
371  27.0f, 0.0f,
372  },
373  qScale, qOffset));
374 
375  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
376  if (dataLayout == armnn::DataLayout::NHWC)
377  {
378  std::vector<T> tmp(inputData.size());
379  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
380  inputData = tmp;
381 
382  std::vector<T> tmp1(outputData.size());
383  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
384  outputData = tmp1;
385  }
386 
387  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
388 
389  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
390 
391  return SimplePooling2dTestImpl<ArmnnType>(
392  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
393 }
394 
395 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
396 LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
397  armnn::IWorkloadFactory& workloadFactory,
399  const armnn::ITensorHandleFactory& tensorHandleFactory,
400  float qScale = 1.0f,
401  int32_t qOffset = 0)
402 {
403  armnn::Pooling2dDescriptor descriptor;
405  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
406  descriptor.m_StrideX = descriptor.m_StrideY = 5;
407  descriptor.m_PadLeft = 50;
408  descriptor.m_PadRight = 50;
409  descriptor.m_PadTop = 50;
410  descriptor.m_PadBottom = 50;
412 
413  armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
414  armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
415 
416  // Set quantization parameters if the requested type is a quantized type.
417  if(armnn::IsQuantizedType<T>())
418  {
419  inputTensorInfo.SetQuantizationScale(qScale);
420  inputTensorInfo.SetQuantizationOffset(qOffset);
421  outputTensorInfo.SetQuantizationScale(qScale);
422  outputTensorInfo.SetQuantizationOffset(qOffset);
423  }
424 
425  std::vector<T> inputVec;
426 
427  for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
428  {
429  inputVec.push_back(1);
430  }
431 
432  auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
433 
434  std::vector<T> outputVec;
435 
436  for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
437  {
438  outputVec.push_back(1);
439  }
440 
441  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
442 
443  return SimplePooling2dTestImpl<ArmnnType>(
444  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
445 }
446 
447 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
448 LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
449  armnn::IWorkloadFactory& workloadFactory,
451  const armnn::ITensorHandleFactory& tensorHandleFactory,
453  float qScale = 1.0f,
454  int32_t qOffset = 0)
455 {
456  armnn::Pooling2dDescriptor descriptor;
458  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
459  descriptor.m_StrideX = descriptor.m_StrideY = 2;
461  descriptor.m_DataLayout = dataLayout;
462 
463  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
464  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
465 
466  std::vector<T> inputData(
467  QuantizedVector<T>({
468  1.0f, 7.0f, 5.0f, 5.0f,
469  1.0f, 7.0f, 5.0f, 5.0f,
470  3.0f, 3.0f, 1.0f, 1.0f,
471  3.0f, 3.0f, 1.0f, 1.0f,
472 
473  1.0f, 7.0f, 0.0f, 0.0f,
474  1.0f, 7.0f, 2.0f, 0.0f,
475  0.0f, 2.0f, 1.0f, 1.0f,
476  0.0f, 0.0f, 1.0f, 1.0f,
477  },
478  qScale, qOffset));
479 
480  std::vector<T> outputData(
481  QuantizedVector<T>({
482  5.0f, 5.0f,
483  3.0f, 1.0f,
484 
485  5.0f, 1.0f,
486  1.0f, 1.0f,
487  },
488  qScale, qOffset));
489 
490  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
491  if (dataLayout == armnn::DataLayout::NHWC)
492  {
493  std::vector<T> tmp(inputData.size());
494  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
495  inputData = tmp;
496 
497  std::vector<T> tmp1(outputData.size());
498  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
499  outputData = tmp1;
500  }
501 
502  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
503 
504  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
505 
506  return SimplePooling2dTestImpl<ArmnnType>(
507  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
508 }
509 
510 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
511 LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
512  armnn::IWorkloadFactory& workloadFactory,
514  const armnn::ITensorHandleFactory& tensorHandleFactory,
515  float qScale = 1.0f,
516  int32_t qOffset = 0)
517 {
518  armnn::Pooling2dDescriptor descriptor;
520  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
521  descriptor.m_StrideX = descriptor.m_StrideY = 1;
523 
524  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
525  auto input = MakeTensor<T, 4>(inputTensorInfo,
526  QuantizedVector<T>({
527  2.0f, 1.0f, 5.0f, 2.0f,
528  1.0f, 2.0f, 2.0f, 1.0f,
529  5.0f, 4.0f, 1.0f, 5.0f,
530  2.0f, 1.0f, 5.0f, 2.0f,
531  },
532  qScale, qOffset));
533 
534  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
535  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
536  QuantizedVector<T>({
537  3.0f, 3.0f,
538  3.0f, 3.0f,
539  },
540  qScale, qOffset));
541 
542  return SimplePooling2dTestImpl<ArmnnType>(
543  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
544 }
545 
546 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
547 LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
548  armnn::IWorkloadFactory& workloadFactory,
550  const armnn::ITensorHandleFactory& tensorHandleFactory,
551  float qScale = 1.0f,
552  int32_t qOffset = 0)
553 {
554  armnn::Pooling2dDescriptor descriptor;
556  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
557  descriptor.m_StrideX = descriptor.m_StrideY = 3;
559 
560  armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
561  auto input = MakeTensor<T, 4>(inputTensorInfo,
562  QuantizedVector<T>({
563  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
564  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
565  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
566  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
567  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
568  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
569  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
570  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
571  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
572  },
573  qScale, qOffset));
574 
575  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
576  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
577  QuantizedVector<T>({
578  3.0f, 3.0f, 3.0f,
579  3.0f, 3.0f, 3.0f,
580  3.0f, 3.0f, 3.0f,
581  },
582  qScale, qOffset));
583 
584  return SimplePooling2dTestImpl<ArmnnType>(
585  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
586 }
587 
588 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
589 LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
590  armnn::IWorkloadFactory& workloadFactory,
592  const armnn::ITensorHandleFactory& tensorHandleFactory,
593  float qScale = 1.0f,
594  int32_t qOffset = 0)
595 {
596  armnn::Pooling2dDescriptor descriptor;
598  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
599  descriptor.m_StrideX = descriptor.m_StrideY = 4;
601 
602  armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
603  auto input = MakeTensor<T, 4>(inputTensorInfo,
604  QuantizedVector<T>({
605  2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
606  1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
607  5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
608  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
609  2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
610  1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
611  5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
612  },
613  qScale, qOffset));
614 
615  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
616  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
617  QuantizedVector<T>({
618  3.0f, 3.0f,
619  3.0f, 3.0f,
620  },
621  qScale, qOffset));
622 
623  return SimplePooling2dTestImpl<ArmnnType>(
624  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
625 }
626 
627 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
628 LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
629  armnn::IWorkloadFactory& workloadFactory,
631  const armnn::ITensorHandleFactory& tensorHandleFactory,
632  float qScale = 1.0f,
633  int32_t qOffset = 0)
634 {
635  armnn::Pooling2dDescriptor descriptor;
637  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
638  descriptor.m_StrideX = descriptor.m_StrideY = 7;
640 
641  armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
642  auto input = MakeTensor<T, 4>(inputTensorInfo,
643  QuantizedVector<T>({
644  1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
645  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
646  0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
647  8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
648  0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
649  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
650  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
651  },
652  qScale, qOffset));
653 
654  armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
655  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
656  QuantizedVector<T>({
657  3.0f,
658  },
659  qScale, qOffset));
660 
661  return SimplePooling2dTestImpl<ArmnnType>(
662  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
663 }
664 
665 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
666 LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
667  armnn::IWorkloadFactory& workloadFactory,
669  const armnn::ITensorHandleFactory& tensorHandleFactory,
670  float qScale = 1.0f,
671  int32_t qOffset = 0)
672 {
673  armnn::Pooling2dDescriptor descriptor;
675  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
676  descriptor.m_StrideX = descriptor.m_StrideY = 9;
678 
679  armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
680  auto input = MakeTensor<T, 4>(inputTensorInfo,
681  QuantizedVector<T>({
682  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
683  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
684  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
685  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
686  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
687  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
688  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
689  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
690  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
691  },
692  qScale, qOffset));
693 
694  armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
695  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
696  QuantizedVector<T>({
697  3.0f,
698  },
699  qScale, qOffset));
700 
701  return SimplePooling2dTestImpl<ArmnnType>(
702  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
703 }
704 
705 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
706 LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
707  armnn::IWorkloadFactory& workloadFactory,
709  const armnn::ITensorHandleFactory& tensorHandleFactory,
710  float qScale = 1.0f,
711  int32_t qOffset = 0)
712 {
713  armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
714  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
715 
716  armnn::Pooling2dDescriptor descriptor;
718  descriptor.m_PoolWidth = 2;
719  descriptor.m_PoolHeight = 3;
720  descriptor.m_StrideX = 2;
721  descriptor.m_StrideY = 1;
722  descriptor.m_PadLeft = 2;
723  descriptor.m_PadRight = 0;
724  descriptor.m_PadTop = 1;
725  descriptor.m_PadBottom = 2;
726  descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
727  descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
728 
729  // Construct input data.
730  auto input = MakeTensor<T, 4>(inputTensorInfo,
731  QuantizedVector<T>({
732  1.0f, 3.0f, 4.0f,
733  },
734  qScale, qOffset));
735 
736  // These were calculated manually.
737  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
738  QuantizedVector<T>({
739  0.0f, 3.0f, 0.0f, 3.0f,
740  },
741  qScale, qOffset));
742 
743  return SimplePooling2dTestImpl<ArmnnType>(
744  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
745 }
746 
747 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
748 LayerTestResult<T, 4> ComparePooling2dTestCommon(
749  armnn::IWorkloadFactory& workloadFactory,
751  armnn::IWorkloadFactory& refWorkloadFactory,
752  const armnn::ITensorHandleFactory& tensorHandleFactory,
753  const armnn::ITensorHandleFactory& refTensorHandleFactory,
754  armnn::PoolingAlgorithm poolingType,
755  float qScale = 1.0f,
756  int32_t qOffset = 0)
757 {
758  IgnoreUnused(memoryManager);
759  const unsigned int inputWidth = 16;
760  const unsigned int inputHeight = 32;
761  const unsigned int channelCount = 2;
762  const unsigned int batchSize = 5;
763 
764  const unsigned int poolSize = 3;
765  const unsigned int strideX = 2;
766  const unsigned int strideY = 4;
767  const unsigned int padX = 0;
768  const unsigned int padY = 0;
769 
770  const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
771  const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
772 
773  armnn::TensorInfo inputTensorInfo;
774  armnn::TensorInfo outputTensorInfo;
775 
776  unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
777  unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
778 
779  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
780  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
781 
782  // Set quantization parameters if the requested type is a quantized type.
783  if(armnn::IsQuantizedType<T>())
784  {
785  inputTensorInfo.SetQuantizationScale(qScale);
786  inputTensorInfo.SetQuantizationOffset(qOffset);
787  outputTensorInfo.SetQuantizationScale(qScale);
788  outputTensorInfo.SetQuantizationOffset(qOffset);
789  }
790 
791  boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
792 
793  LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
794 
795  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
796  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
797 
800  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
801  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
802  data.m_Parameters.m_PoolType = poolingType;
803  data.m_Parameters.m_PoolWidth = poolSize;
804  data.m_Parameters.m_PoolHeight = poolSize;
805  data.m_Parameters.m_StrideX = strideX;
806  data.m_Parameters.m_StrideY = strideY;
807  data.m_Parameters.m_PadLeft = padX;
808  data.m_Parameters.m_PadRight = padX;
809  data.m_Parameters.m_PadTop = padY;
810  data.m_Parameters.m_PadBottom = padY;
812 
813  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
814  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
815 
816  // Don't execute if Pooling is not supported, as an exception will be raised.
817  armnn::BackendId backend = workloadFactory.GetBackendId();
818  const size_t reasonIfUnsupportedMaxLen = 255;
819  char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
820  comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
821  data.m_Parameters,
822  reasonIfUnsupported, reasonIfUnsupportedMaxLen);
823  if (!comparisonResult.supported)
824  {
825  return comparisonResult;
826  }
827 
828  armnn::Pooling2dQueueDescriptor refData = data;
829  armnn::WorkloadInfo refInfo = info;
830  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
831  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
832 
833  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
834  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
835 
836  outputHandleRef->Allocate();
837  inputHandleRef->Allocate();
838  inputHandle->Allocate();
839  outputHandle->Allocate();
840 
841  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
842  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
843 
844  workload->Execute();
845  workloadRef->Execute();
846 
847  CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
848  CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
849 
850  return comparisonResult;
851 }
852 
853 //
854 // Tests max pooling with the following parameters:
855 //
856 // Pooling size: 2x2
857 // Stride: (2,2)
858 // input size: 4x4
859 // channels: 1
860 // batch size: 1
861 //
862 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
863 LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
864  armnn::IWorkloadFactory& workloadFactory,
866  const armnn::ITensorHandleFactory& tensorHandleFactory,
867  bool forceNoPadding,
868  float qScale = 1.0f,
869  int32_t qOffset = 0)
870 {
871  armnn::Pooling2dDescriptor descriptor;
873  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
874  descriptor.m_StrideX = 2;
875  descriptor.m_StrideY = 2;
876  descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
877  descriptor.m_PadTop = descriptor.m_PadBottom = 0;
880 
881 
882  unsigned int inputWidth = 4;
883 
884  unsigned int inputHeight = 4;
885 
886  unsigned int outputWidth =
887  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
888  descriptor.m_StrideX;
889  unsigned int outputHeight =
890  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
891  descriptor.m_StrideY;
892  unsigned int channels = 1;
893  unsigned int batchSize = 1;
894 
895  std::vector<float> inputData = {
896  510.0f, 222.0f, 780.0f, 654.0f,
897  141.0f, 276.0f, 15.0f, 546.0f,
898  303.0f, 618.0f, 582.0f, 339.0f,
899  438.0f, 564.0f, 573.0f, 402.0f
900  };
901 
902  // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
903  std::vector<float> expectedOutputDataWithPadding = {
904  0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
905  0.0f, 438.0f, 618.0f, 402.0f, 0.0f
906  };
907 
908  std::vector<float> expectedOutputDataNoPadding = {
909  510.0f, 780.0f,
910  618.0f, 582.0f
911  };
912 
913  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
914 
915  // Scale and offset should match input - we're just calculating maximum values.
916  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
917 
918  // Set quantization parameters if the requested type is a quantized type.
919  if(armnn::IsQuantizedType<T>())
920  {
921  inputTensorInfo.SetQuantizationScale(qScale);
922  inputTensorInfo.SetQuantizationOffset(qOffset);
923  outputTensorInfo.SetQuantizationScale(qScale);
924  outputTensorInfo.SetQuantizationOffset(qOffset);
925  }
926 
927  auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
928 
929  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
930  forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
931  QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
932 
933  return SimplePooling2dTestImpl<ArmnnType>(
934  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
935 }
936 
937 //
938 // Tests max pooling with the following parameters:
939 //
940 // Pooling size: 3x2
941 // Stride: (2,2)
942 // input size: 3x2
943 // channels: 1
944 // batch size: 1
945 //
946 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
947 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
948  armnn::IWorkloadFactory& workloadFactory,
950  const armnn::ITensorHandleFactory& tensorHandleFactory,
951  bool forceNoPadding,
952  float qScale = 1.0f,
953  int32_t qOffset = 0)
954 {
955  armnn::Pooling2dDescriptor descriptor;
957  descriptor.m_PoolWidth = 3;
958  descriptor.m_PoolHeight = 2;
959  descriptor.m_StrideX = 2;
960  descriptor.m_StrideY = 2;
961  descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
962  descriptor.m_PadRight = descriptor.m_PadLeft;
963  descriptor.m_PadTop = 0;
964  descriptor.m_PadBottom = 0;
967 
968  unsigned int inputWidth = 3;
969  unsigned int inputHeight = 2;
970  unsigned int outputWidth =
971  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
972  descriptor.m_StrideX;
973  unsigned int outputHeight =
974  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
975  descriptor.m_StrideY;
976  unsigned int channels = 1;
977  unsigned int batchSize = 1;
978 
979  std::vector<float> inputData = {
980  3.0f, 6.0f, 9.0f,
981  12.0f, 15.0f, 18.0f,
982  };
983 
984  std::vector<float> expectedOutputDataWithPadding = {
985  6.0f, 8.0f,
986  };
987 
988  std::vector<float> expectedOutputDataNoPadding = {
989  10.5f,
990  };
991 
992  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
993 
994  // Scale and offset should match input - we're just calculating average values.
995  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
996 
997  // Set quantization parameters if the requested type is a quantized type.
998  if(armnn::IsQuantizedType<T>())
999  {
1000  inputTensorInfo.SetQuantizationScale(qScale);
1001  inputTensorInfo.SetQuantizationOffset(qOffset);
1002  outputTensorInfo.SetQuantizationScale(qScale);
1003  outputTensorInfo.SetQuantizationOffset(qOffset);
1004  }
1005 
1006  auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
1007 
1008  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1009  forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
1010  QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
1011 
1012  return SimplePooling2dTestImpl<ArmnnType>(
1013  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1014 }
1015 
1016 
1017 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1018 LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
1019  armnn::IWorkloadFactory& workloadFactory,
1021  const armnn::ITensorHandleFactory& tensorHandleFactory,
1022  float qScale = 1.0f,
1023  int32_t qOffset = 0)
1024 {
1025  armnn::Pooling2dDescriptor descriptor;
1027  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1028  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1029  descriptor.m_PadLeft = 1;
1030  descriptor.m_PadRight = 1;
1031  descriptor.m_PadTop = 1;
1032  descriptor.m_PadBottom = 1;
1034 
1035  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1036  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1037 
1038  // Set quantization parameters if the requested type is a quantized type.
1039  if(armnn::IsQuantizedType<T>())
1040  {
1041  inputTensorInfo.SetQuantizationScale(qScale);
1042  inputTensorInfo.SetQuantizationOffset(qOffset);
1043  outputTensorInfo.SetQuantizationScale(qScale);
1044  outputTensorInfo.SetQuantizationOffset(qOffset);
1045  }
1046 
1047  auto input = MakeTensor<T, 4>(inputTensorInfo,
1048  QuantizedVector<T>({
1049  -1.0f, -2.0f, 3.0f, 4.0f,
1050  -1.0f, -2.0f, 3.0f, 4.0f,
1051  1.0f, 2.0f, -3.0f, -4.0f,
1052  1.0f, 2.0f, -3.0f, -4.0f,
1053  },
1054  qScale, qOffset));
1055 
1056  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1057  QuantizedVector<T>({
1058  -1.0f, 3.0f, 4.0f,
1059  1.0f, 3.0f, 4.0f,
1060  1.0f, 2.0f, -4.0f,
1061  },
1062  qScale, qOffset));
1063 
1064  return SimplePooling2dTestImpl<ArmnnType>(
1065  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1066 }
1067 
1068 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1069 LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1070  armnn::IWorkloadFactory& workloadFactory,
1072  const armnn::ITensorHandleFactory& tensorHandleFactory,
1073  float qScale = 1.0f,
1074  int32_t qOffset = 0)
1075 {
1076  armnn::Pooling2dDescriptor descriptor;
1078  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1079  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1080  descriptor.m_PadLeft = 1;
1081  descriptor.m_PadRight = 1;
1082  descriptor.m_PadTop = 1;
1083  descriptor.m_PadBottom = 1;
1085 
1086  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1087  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1088 
1089  // Set quantization parameters if the requested type is a quantized type.
1090  if(armnn::IsQuantizedType<T>())
1091  {
1092  inputTensorInfo.SetQuantizationScale(qScale);
1093  inputTensorInfo.SetQuantizationOffset(qOffset);
1094  outputTensorInfo.SetQuantizationScale(qScale);
1095  outputTensorInfo.SetQuantizationOffset(qOffset);
1096  }
1097 
1098  auto input = MakeTensor<T, 4>(inputTensorInfo,
1099  QuantizedVector<T>({
1100  -1.0f, -2.0f, 3.0f, 4.0f,
1101  -1.0f, -2.0f, 3.0f, 4.0f,
1102  1.0f, 2.0f, -3.0f, -4.0f,
1103  1.0f, 2.0f, -3.0f, -4.0f,
1104  },
1105  qScale, qOffset));
1106 
1107  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1108  QuantizedVector<T>({
1109  -1.0f, 3.0f, 4.0f, 4.0f,
1110  2.0f, 3.0f, 4.0f, 4.0f,
1111  2.0f, 3.0f, 4.0f, 4.0f,
1112  2.0f, 2.0f, 2.0f, -3.0f,
1113  },
1114  qScale, qOffset));
1115 
1116  return SimplePooling2dTestImpl<ArmnnType>(
1117  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1118 }
1119 
1120 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1121 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1122  armnn::IWorkloadFactory& workloadFactory,
1124  const armnn::ITensorHandleFactory& tensorHandleFactory,
1125  float qScale = 1.0f,
1126  int32_t qOffset = 0)
1127 {
1128  armnn::Pooling2dDescriptor descriptor;
1130  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1131  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1132  descriptor.m_PadLeft = 1;
1133  descriptor.m_PadRight = 1;
1134  descriptor.m_PadTop = 1;
1135  descriptor.m_PadBottom = 1;
1137 
1138  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1139  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1140 
1141  // Set quantization parameters if the requested type is a quantized type.
1142  if(armnn::IsQuantizedType<T>())
1143  {
1144  inputTensorInfo.SetQuantizationScale(qScale);
1145  inputTensorInfo.SetQuantizationOffset(qOffset);
1146  outputTensorInfo.SetQuantizationScale(qScale);
1147  outputTensorInfo.SetQuantizationOffset(qOffset);
1148  }
1149 
1150  auto input = MakeTensor<T, 4>(inputTensorInfo,
1151  QuantizedVector<T>({
1152  12.0f, 20.0f, 32.0f, 40.0f,
1153  12.0f, 20.0f, 32.0f, 40.0f,
1154  12.0f, 20.0f, 32.0f, 40.0f,
1155  12.0f, 20.0f, 32.0f, 40.0f,
1156  },
1157  qScale, qOffset));
1158 
1159  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1160  QuantizedVector<T>({
1161  3.0f, 13.0f, 10.0f,
1162  6.0f, 26.0f, 20.0f,
1163  3.0f, 13.0f, 10.0f,
1164  },
1165  qScale, qOffset));
1166 
1167  return SimplePooling2dTestImpl<ArmnnType>(
1168  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1169 }
1170 
1171 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1172 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1173  armnn::IWorkloadFactory& workloadFactory,
1175  const armnn::ITensorHandleFactory& tensorHandleFactory,
1176  float qScale = 1.0f,
1177  int32_t qOffset = 0)
1178 {
1179  armnn::Pooling2dDescriptor descriptor;
1181  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1182  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1183  descriptor.m_PadLeft = 0;
1184  descriptor.m_PadRight = 0;
1185  descriptor.m_PadTop = 0;
1186  descriptor.m_PadBottom = 0;
1189 
1190  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1191  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
1192 
1193  // Set quantization parameters if the requested type is a quantized type.
1194  if(armnn::IsQuantizedType<T>())
1195  {
1196  inputTensorInfo.SetQuantizationScale(qScale);
1197  inputTensorInfo.SetQuantizationOffset(qOffset);
1198  outputTensorInfo.SetQuantizationScale(qScale);
1199  outputTensorInfo.SetQuantizationOffset(qOffset);
1200  }
1201 
1202  auto input = MakeTensor<T, 4>(inputTensorInfo,
1203  QuantizedVector<T>({
1204  1.0f, 2.0f, 3.0f, 4.0f,
1205  1.0f, 2.0f, 3.0f, 4.0f,
1206  1.0f, 2.0f, 3.0f, 4.0f,
1207  1.0f, 2.0f, 3.0f, 4.0f,
1208  },
1209  qScale, qOffset));
1210 
1211  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1212  QuantizedVector<T>({
1213  2.0f, 3.5f,
1214  2.0f, 3.5f
1215  },
1216  qScale, qOffset));
1217 
1218  return SimplePooling2dTestImpl<ArmnnType>(
1219  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1220 }
1221 
1222 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1223 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1224  armnn::IWorkloadFactory& workloadFactory,
1226  const armnn::ITensorHandleFactory& tensorHandleFactory,
1227  float qScale = 1.0f,
1228  int32_t qOffset = 0)
1229 {
1230  armnn::Pooling2dDescriptor descriptor;
1232  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1233  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1234  descriptor.m_PadLeft = 1;
1235  descriptor.m_PadRight = 1;
1236  descriptor.m_PadTop = 1;
1237  descriptor.m_PadBottom = 1;
1239 
1240  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1241  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1242 
1243  // Set quantization parameters if the requested type is a quantized type.
1244  if(armnn::IsQuantizedType<T>())
1245  {
1246  inputTensorInfo.SetQuantizationScale(qScale);
1247  inputTensorInfo.SetQuantizationOffset(qOffset);
1248  outputTensorInfo.SetQuantizationScale(qScale);
1249  outputTensorInfo.SetQuantizationOffset(qOffset);
1250  }
1251 
1252  auto input = MakeTensor<T, 4>(inputTensorInfo,
1253  QuantizedVector<T>({
1254  9.0f, 27.0f, 18.0f, 36.0f,
1255  18.0f, 9.0f, 18.0f, 9.0f,
1256  27.0f, 18.0f, 9.0f, 27.0f,
1257  9.0f, 27.0f, 9.0f, 18.0f,
1258  },
1259  qScale, qOffset));
1260 
1261  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1262  QuantizedVector<T>({
1263  7.0f, 11.0f, 13.0f, 9.0f,
1264  12.0f, 17.0f, 19.0f, 13.0f,
1265  12.0f, 16.0f, 16.0f, 10.0f,
1266  9.0f, 11.0f, 12.0f, 7.0f,
1267  },
1268  qScale, qOffset));
1269 
1270  return SimplePooling2dTestImpl<ArmnnType>(
1271  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1272 }
1273 
1274 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1275 LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1276  armnn::IWorkloadFactory& workloadFactory,
1278  const armnn::ITensorHandleFactory& tensorHandleFactory,
1279  float qScale = 1.0f,
1280  int32_t qOffset = 0)
1281 {
1282  armnn::Pooling2dDescriptor descriptor;
1284  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1285  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1286  descriptor.m_PadLeft = 1;
1287  descriptor.m_PadRight = 1;
1288  descriptor.m_PadTop = 1;
1289  descriptor.m_PadBottom = 1;
1291 
1292  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1293  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1294 
1295  // Set quantization parameters if the requested type is a quantized type.
1296  if(armnn::IsQuantizedType<T>())
1297  {
1298  inputTensorInfo.SetQuantizationScale(qScale);
1299  inputTensorInfo.SetQuantizationOffset(qOffset);
1300  outputTensorInfo.SetQuantizationScale(qScale);
1301  outputTensorInfo.SetQuantizationOffset(qOffset);
1302  }
1303 
1304  auto input = MakeTensor<T, 4>(inputTensorInfo,
1305  QuantizedVector<T>({
1306  2.0f, 4.0f, 8.0f, 16.0f,
1307  4.0f, 2.0f, 2.0f, 4.0f,
1308  8.0f, 2.0f, 4.0f, 2.0f,
1309  16.0f, 2.0f, 2.0f, 8.0f,
1310  },
1311  qScale, qOffset));
1312 
1313  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1314  QuantizedVector<T>({
1315  1.0f, 4.4721f, 8.0f,
1316  4.4721f, 2.6457f, 2.236f,
1317  8.0f, 1.4142f, 4.0f,
1318  },
1319  qScale, qOffset));
1320 
1321  return SimplePooling2dTestImpl<ArmnnType>(
1322  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1323 }
1324 
1325 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1326 LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1327  armnn::IWorkloadFactory& workloadFactory,
1329  const armnn::ITensorHandleFactory& tensorHandleFactory,
1330  float qScale = 1.0f,
1331  int32_t qOffset = 0)
1332 {
1333  armnn::Pooling2dDescriptor descriptor;
1335  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1336  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1337  descriptor.m_PadLeft = 1;
1338  descriptor.m_PadRight = 1;
1339  descriptor.m_PadTop = 1;
1340  descriptor.m_PadBottom = 1;
1342 
1343  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1344  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1345 
1346  // Set quantization parameters if the requested type is a quantized type.
1347  if(armnn::IsQuantizedType<T>())
1348  {
1349  inputTensorInfo.SetQuantizationScale(qScale);
1350  inputTensorInfo.SetQuantizationOffset(qOffset);
1351  outputTensorInfo.SetQuantizationScale(qScale);
1352  outputTensorInfo.SetQuantizationOffset(qOffset);
1353  }
1354 
1355  auto input = MakeTensor<T, 4>(inputTensorInfo,
1356  QuantizedVector<T>({
1357  1.0f, 2.0f, 3.0f, 4.0f,
1358  1.0f, 2.0f, 3.0f, 4.0f,
1359  1.0f, 2.0f, 3.0f, 4.0f,
1360  1.0f, 2.0f, 3.0f, 4.0f,
1361  },
1362  qScale, qOffset));
1363 
1364  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1365  QuantizedVector<T>({
1366  1.0540f, 1.7638f, 2.5385f, 2.3570f,
1367  1.2909f, 2.1602f, 3.1091f, 2.8867f,
1368  1.2909f, 2.1602f, 3.1091f, 2.8867f,
1369  1.0540f, 1.7638f, 2.5385f, 2.3570f,
1370  },
1371  qScale, qOffset));
1372 
1373  return SimplePooling2dTestImpl<ArmnnType>(
1374  workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
1375 }
1376 
1377 } // anonymous namespace
1378 
1380  armnn::IWorkloadFactory& workloadFactory,
1382  const armnn::ITensorHandleFactory& tensorHandleFactory,
1383  bool forceNoPadding)
1384 {
1385  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1386  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1387 }
1388 
1390  armnn::IWorkloadFactory& workloadFactory,
1392  const armnn::ITensorHandleFactory& tensorHandleFactory,
1393  bool forceNoPadding)
1394 {
1395  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
1396  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 3.0f, -5);
1397 }
1398 
1400  armnn::IWorkloadFactory& workloadFactory,
1402  const armnn::ITensorHandleFactory& tensorHandleFactory,
1403  bool forceNoPadding)
1404 {
1405  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
1406  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1407 }
1408 
1410  armnn::IWorkloadFactory& workloadFactory,
1412  const armnn::ITensorHandleFactory& tensorHandleFactory,
1413  bool forceNoPadding)
1414 {
1415  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1416  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1417 }
1418 
1420  armnn::IWorkloadFactory& workloadFactory,
1422  const armnn::ITensorHandleFactory& tensorHandleFactory,
1423  bool forceNoPadding)
1424 {
1425  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
1426  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 0.1f, 128);
1427 }
1428 
1430  armnn::IWorkloadFactory& workloadFactory,
1432  const armnn::ITensorHandleFactory& tensorHandleFactory,
1433  bool forceNoPadding)
1434 {
1435  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
1436  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1437 }
1438 
1440  armnn::IWorkloadFactory& workloadFactory,
1442  const armnn::ITensorHandleFactory& tensorHandleFactory,
1443  const armnn::DataLayout dataLayout)
1444 {
1445  return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1446  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1447 }
1448 
1450  armnn::IWorkloadFactory& workloadFactory,
1452  const armnn::ITensorHandleFactory& tensorHandleFactory,
1453  const armnn::DataLayout dataLayout)
1454 {
1455  return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1456  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1457 }
1458 
1460  armnn::IWorkloadFactory& workloadFactory,
1462  const armnn::ITensorHandleFactory& tensorHandleFactory,
1463  const armnn::DataLayout dataLayout)
1464 {
1465  return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1466  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1467 }
1469  armnn::IWorkloadFactory& workloadFactory,
1471  const armnn::ITensorHandleFactory& tensorHandleFactory)
1472 {
1473  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1474  workloadFactory, memoryManager, tensorHandleFactory);
1475 }
1476 
1478  armnn::IWorkloadFactory& workloadFactory,
1480  const armnn::ITensorHandleFactory& tensorHandleFactory)
1481 {
1482  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1483  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
1484 }
1485 
1487  armnn::IWorkloadFactory& workloadFactory,
1489  const armnn::ITensorHandleFactory& tensorHandleFactory)
1490 {
1491  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1492  workloadFactory, memoryManager, tensorHandleFactory);
1493 }
1494 
1496  armnn::IWorkloadFactory& workloadFactory,
1498  const armnn::ITensorHandleFactory& tensorHandleFactory)
1499 {
1500  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(
1501  workloadFactory, memoryManager, tensorHandleFactory);
1502 }
1503 
1505  armnn::IWorkloadFactory& workloadFactory,
1507  const armnn::ITensorHandleFactory& tensorHandleFactory)
1508 {
1509  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1510  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
1511 }
1512 
1514  armnn::IWorkloadFactory& workloadFactory,
1516  const armnn::ITensorHandleFactory& tensorHandleFactory)
1517 {
1518  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1519  workloadFactory, memoryManager, tensorHandleFactory);
1520 }
1521 
1523  armnn::IWorkloadFactory& workloadFactory,
1525  const armnn::ITensorHandleFactory& tensorHandleFactory,
1526  const armnn::DataLayout dataLayout)
1527 {
1528  return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1529  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1530 }
1531 
1533  armnn::IWorkloadFactory& workloadFactory,
1535  const armnn::ITensorHandleFactory& tensorHandleFactory,
1536  const armnn::DataLayout dataLayout)
1537 {
1538  return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1539  workloadFactory, memoryManager, tensorHandleFactory, dataLayout, 0.5, -1);
1540 }
1541 
1543  armnn::IWorkloadFactory& workloadFactory,
1545  const armnn::ITensorHandleFactory& tensorHandleFactory,
1546  const armnn::DataLayout dataLayout)
1547 {
1548  return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1549  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1550 }
1551 
1553  armnn::IWorkloadFactory& workloadFactory,
1555  const armnn::ITensorHandleFactory& tensorHandleFactory,
1556  bool forceNoPadding)
1557 {
1558  return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1559  workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
1560 }
1561 
1563  armnn::IWorkloadFactory& workloadFactory,
1565  const armnn::ITensorHandleFactory& tensorHandleFactory)
1566 {
1567  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(
1568  workloadFactory, memoryManager, tensorHandleFactory);
1569 }
1570 
1572  armnn::IWorkloadFactory& workloadFactory,
1574  const armnn::ITensorHandleFactory& tensorHandleFactory)
1575 {
1576  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1577  workloadFactory, memoryManager, tensorHandleFactory, 0.5, -1);
1578 }
1579 
1581  armnn::IWorkloadFactory& workloadFactory,
1583  const armnn::ITensorHandleFactory& tensorHandleFactory)
1584 {
1585  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1586  workloadFactory, memoryManager, tensorHandleFactory);
1587 }
1589  armnn::IWorkloadFactory& workloadFactory,
1591  const armnn::ITensorHandleFactory& tensorHandleFactory)
1592 {
1593  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1594  workloadFactory, memoryManager, tensorHandleFactory);
1595 }
1596 
1598  armnn::IWorkloadFactory& workloadFactory,
1600  const armnn::ITensorHandleFactory& tensorHandleFactory)
1601 {
1602  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1603  workloadFactory, memoryManager, tensorHandleFactory);
1604 }
1605 
1607  armnn::IWorkloadFactory& workloadFactory,
1609  const armnn::ITensorHandleFactory& tensorHandleFactory)
1610 {
1611  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1612  workloadFactory, memoryManager, tensorHandleFactory);
1613 }
1614 
1616  armnn::IWorkloadFactory& workloadFactory,
1618  const armnn::ITensorHandleFactory& tensorHandleFactory)
1619 {
1620  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1621  workloadFactory, memoryManager, tensorHandleFactory);
1622 }
1623 
1625  armnn::IWorkloadFactory& workloadFactory,
1627  const armnn::ITensorHandleFactory& tensorHandleFactory)
1628 {
1629  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
1630  workloadFactory, memoryManager, tensorHandleFactory);
1631 }
1632 
1634  armnn::IWorkloadFactory& workloadFactory,
1636  const armnn::ITensorHandleFactory& tensorHandleFactory)
1637 {
1638  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
1639  workloadFactory, memoryManager, tensorHandleFactory);
1640 }
1641 
1643  armnn::IWorkloadFactory& workloadFactory,
1645  const armnn::ITensorHandleFactory& tensorHandleFactory)
1646 {
1647  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(
1648  workloadFactory, memoryManager, tensorHandleFactory);
1649 }
1650 
1652  armnn::IWorkloadFactory& workloadFactory,
1654  const armnn::ITensorHandleFactory& tensorHandleFactory)
1655 {
1656  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1657  workloadFactory, memoryManager, tensorHandleFactory);
1658 }
1659 
1661  armnn::IWorkloadFactory& workloadFactory,
1663  const armnn::ITensorHandleFactory& tensorHandleFactory)
1664 {
1665  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1666  workloadFactory, memoryManager, tensorHandleFactory);
1667 }
1668 
1670  armnn::IWorkloadFactory& workloadFactory,
1672  const armnn::ITensorHandleFactory& tensorHandleFactory,
1673  const armnn::DataLayout dataLayout)
1674 {
1675  return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1676  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1677 }
1678 
1680  armnn::IWorkloadFactory& workloadFactory,
1682  const armnn::ITensorHandleFactory& tensorHandleFactory,
1683  const armnn::DataLayout dataLayout)
1684 {
1685  return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1686  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1687 }
1688 
1690  armnn::IWorkloadFactory& workloadFactory,
1692  const armnn::ITensorHandleFactory& tensorHandleFactory,
1693  const armnn::DataLayout dataLayout)
1694 {
1695  return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1696  workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
1697 }
1698 
1700  armnn::IWorkloadFactory& workloadFactory,
1702  const armnn::ITensorHandleFactory& tensorHandleFactory)
1703 {
1704  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(
1705  workloadFactory, memoryManager, tensorHandleFactory);
1706 }
1707 
1709  armnn::IWorkloadFactory& workloadFactory,
1711  const armnn::ITensorHandleFactory& tensorHandleFactory)
1712 {
1713  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(
1714  workloadFactory, memoryManager, tensorHandleFactory);
1715 }
1716 
1718  armnn::IWorkloadFactory& workloadFactory,
1720  const armnn::ITensorHandleFactory& tensorHandleFactory)
1721 {
1722  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(
1723  workloadFactory, memoryManager, tensorHandleFactory);
1724 }
1725 
1727  armnn::IWorkloadFactory& workloadFactory,
1729  const armnn::ITensorHandleFactory& tensorHandleFactory)
1730 {
1731  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(
1732  workloadFactory, memoryManager, tensorHandleFactory);
1733 }
1734 
1736  armnn::IWorkloadFactory& workloadFactory,
1738  const armnn::ITensorHandleFactory& tensorHandleFactory)
1739 {
1740  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(
1741  workloadFactory, memoryManager, tensorHandleFactory);
1742 }
1743 
1745  armnn::IWorkloadFactory& workloadFactory,
1747  const armnn::ITensorHandleFactory& tensorHandleFactory)
1748 {
1749  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(
1750  workloadFactory, memoryManager, tensorHandleFactory);
1751 }
1753  armnn::IWorkloadFactory& workloadFactory,
1755  const armnn::ITensorHandleFactory& tensorHandleFactory)
1756 {
1757  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(
1758  workloadFactory, memoryManager, tensorHandleFactory);
1759 }
1760 
1762  armnn::IWorkloadFactory& workloadFactory,
1764  const armnn::ITensorHandleFactory& tensorHandleFactory)
1765 {
1766  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(
1767  workloadFactory, memoryManager, tensorHandleFactory);
1768 }
1769 
1771  armnn::IWorkloadFactory& workloadFactory,
1773  const armnn::ITensorHandleFactory& tensorHandleFactory)
1774 {
1775  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(
1776  workloadFactory, memoryManager, tensorHandleFactory);
1777 }
1778 
1780  armnn::IWorkloadFactory& workloadFactory,
1782  const armnn::ITensorHandleFactory& tensorHandleFactory)
1783 {
1784  return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(
1785  workloadFactory, memoryManager, tensorHandleFactory);
1786 }
1787 
1789  armnn::IWorkloadFactory& workloadFactory,
1791  const armnn::ITensorHandleFactory& tensorHandleFactory)
1792 {
1793  return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(
1794  workloadFactory, memoryManager, tensorHandleFactory);
1795 }
1796 
1798  armnn::IWorkloadFactory& workloadFactory,
1800  const armnn::ITensorHandleFactory& tensorHandleFactory)
1801 {
1802  return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(
1803  workloadFactory, memoryManager, tensorHandleFactory);
1804 }
1805 
1807  armnn::IWorkloadFactory& workloadFactory,
1809  const armnn::ITensorHandleFactory& tensorHandleFactory)
1810 {
1811  return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(
1812  workloadFactory, memoryManager, tensorHandleFactory);
1813 }
1814 
1816  armnn::IWorkloadFactory& workloadFactory,
1818  const armnn::ITensorHandleFactory& tensorHandleFactory)
1819 {
1820  return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(
1821  workloadFactory, memoryManager, tensorHandleFactory);
1822 }
1823 
1825  armnn::IWorkloadFactory& workloadFactory,
1827  const armnn::ITensorHandleFactory& tensorHandleFactory)
1828 {
1829  return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(
1830  workloadFactory, memoryManager, tensorHandleFactory);
1831 }
1833  armnn::IWorkloadFactory& workloadFactory,
1835  const armnn::ITensorHandleFactory& tensorHandleFactory)
1836 {
1837  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1838  workloadFactory, memoryManager, tensorHandleFactory);
1839 }
1840 
1842  armnn::IWorkloadFactory& workloadFactory,
1844  const armnn::ITensorHandleFactory& tensorHandleFactory)
1845 {
1846  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1847  workloadFactory, memoryManager, tensorHandleFactory);
1848 }
1849 
1851  armnn::IWorkloadFactory& workloadFactory,
1853  const armnn::ITensorHandleFactory& tensorHandleFactory)
1854 {
1855  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1856  workloadFactory, memoryManager, tensorHandleFactory);
1857 }
1858 
1860  armnn::IWorkloadFactory& workloadFactory,
1862  const armnn::ITensorHandleFactory& tensorHandleFactory)
1863 {
1864  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(
1865  workloadFactory, memoryManager, tensorHandleFactory);
1866 }
1867 
1869  armnn::IWorkloadFactory& workloadFactory,
1871  const armnn::ITensorHandleFactory& tensorHandleFactory)
1872 {
1873  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1874  workloadFactory, memoryManager, tensorHandleFactory);
1875 }
1876 
1878  armnn::IWorkloadFactory& workloadFactory,
1880  const armnn::ITensorHandleFactory& tensorHandleFactory)
1881 {
1882  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1883  workloadFactory, memoryManager, tensorHandleFactory);
1884 }
1885 
1887  armnn::IWorkloadFactory& workloadFactory,
1889  const armnn::ITensorHandleFactory& tensorHandleFactory)
1890 {
1891  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(
1892  workloadFactory, memoryManager, tensorHandleFactory);
1893 }
1894 
1896  armnn::IWorkloadFactory& workloadFactory,
1898  const armnn::ITensorHandleFactory& tensorHandleFactory)
1899 {
1900  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1901  workloadFactory, memoryManager, tensorHandleFactory);
1902 }
1903 
1905  armnn::IWorkloadFactory& workloadFactory,
1907  const armnn::ITensorHandleFactory& tensorHandleFactory)
1908 {
1909  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(
1910  workloadFactory, memoryManager, tensorHandleFactory);
1911 }
1912 
1914  armnn::IWorkloadFactory& workloadFactory,
1916  armnn::IWorkloadFactory& refWorkloadFactory,
1917  const armnn::ITensorHandleFactory& tensorHandleFactory,
1918  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1919  armnn::PoolingAlgorithm poolingType)
1920 {
1921  return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1922  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
1923 }
1924 
1926  armnn::IWorkloadFactory& workloadFactory,
1928  armnn::IWorkloadFactory& refWorkloadFactory,
1929  const armnn::ITensorHandleFactory& tensorHandleFactory,
1930  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1931  armnn::PoolingAlgorithm poolingType)
1932 {
1933  return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1934  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
1935  poolingType, 0.1f, 128);
1936 }
1937 
1939  armnn::IWorkloadFactory& workloadFactory,
1941  armnn::IWorkloadFactory& refWorkloadFactory,
1942  const armnn::ITensorHandleFactory& tensorHandleFactory,
1943  const armnn::ITensorHandleFactory& refTensorHandleFactory,
1944  armnn::PoolingAlgorithm poolingType)
1945 {
1946  return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
1947  workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
1948 }
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
Definition: Tensor.cpp:181
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual const BackendId & GetBackendId() const =0
LayerTestResult< int16_t, 4 > SimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout
Definition: Types.hpp:50
unsigned int GetWidthIndex() const
LayerTestResult< int16_t, 4 > IgnorePaddingAveragePooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerTestResult< int16_t, 4 > LargeTensorsAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_PoolWidth
Pooling width value.
LayerTestResult< uint8_t, 4 > IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
LayerTestResult< float, 4 > IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
uint32_t m_PadTop
Padding top value in the height dimension.
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
void IgnoreUnused(Ts &&...)
PoolingAlgorithm
Definition: Types.hpp:104
LayerTestResult< float, 4 > L2Pooling2dSize9Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > ComparePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > AsymmetricNonSquarePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
unsigned int GetHeightIndex() const
LayerTestResult< float, 4 > L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > ComparePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
uint32_t m_PoolHeight
Pooling height value.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
LayerTestResult< float, 4 > SimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< float, 4 > LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
uint32_t m_PadRight
Padding right value in the width dimension.
LayerTestResult< float, 4 > SimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize7Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
LayerTestResult< uint8_t, 4 > IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ComparePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory, armnn::PoolingAlgorithm poolingType)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int16_t, 4 > L2Pooling2dSize7Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< uint8_t, 4 > AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
The padding fields count, but are ignored.
LayerTestResult< uint8_t, 4 > IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
LayerTestResult< int16_t, 4 > L2Pooling2dSize9Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
LayerTestResult< uint8_t, 4 > LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about inputs and outputs to a layer.
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingMaxPooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< float, 4 > IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride1Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > IgnorePaddingL2Pooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, bool forceNoPadding)
unsigned int GetChannelsIndex() const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
LayerTestResult< uint8_t, 4 > L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)