ArmNN
 20.08
Pooling2dTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Pooling2dTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
11 #include <armnn/LayerSupport.hpp>
12 
15 #include <armnnUtils/Permute.hpp>
16 
18 
20 
23 
24 #include <test/TensorHelpers.hpp>
25 
26 #include <boost/numeric/conversion/cast.hpp>
27 
28 namespace
29 {
30 
31 using namespace armnnUtils;
32 
33 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
34 LayerTestResult<T, 4> SimplePooling2dTestImpl(
35  armnn::IWorkloadFactory& workloadFactory,
37  armnn::Pooling2dDescriptor descriptor,
38  float qScale,
39  int32_t qOffset,
40  const boost::multi_array<T, 4>& input,
41  const boost::multi_array<T, 4>& outputExpected)
42 {
43  IgnoreUnused(memoryManager);
44  const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
45  const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
46  auto heightIndex = dimensionIndices.GetHeightIndex();
47  auto widthIndex = dimensionIndices.GetWidthIndex();
48  auto channelsIndex = dimensionIndices.GetChannelsIndex();
49 
50  unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
51  unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
52  unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
53  unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
54 
55  unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
56  unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
57  unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
58  unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
59 
61  inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
62 
64  outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
65 
66  // Set quantization parameters if the requested type is a quantized type.
67  if(armnn::IsQuantizedType<T>())
68  {
69  inputTensorInfo.SetQuantizationScale(qScale);
70  inputTensorInfo.SetQuantizationOffset(qOffset);
71  outputTensorInfo.SetQuantizationScale(qScale);
72  outputTensorInfo.SetQuantizationOffset(qOffset);
73  }
74 
75  LayerTestResult<T, 4> result(outputTensorInfo);
76 
78  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
79  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
81 
82  armnn::Pooling2dQueueDescriptor queueDescriptor;
83  queueDescriptor.m_Parameters = descriptor;
84  queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
85 
86  armnn::WorkloadInfo workloadInfo;
87  AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
88  AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
89 
90  // Don't execute if Pooling is not supported, as an exception will be raised.
91  armnn::BackendId backend = workloadFactory.GetBackendId();
92  const size_t reasonIfUnsupportedMaxLen = 255;
93  char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
94  result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
95  queueDescriptor.m_Parameters,
96  reasonIfUnsupported, reasonIfUnsupportedMaxLen);
97  if (!result.supported)
98  {
99  return result;
100  }
101 
102  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
103 
104  inputHandle->Allocate();
105  outputHandle->Allocate();
106 
107  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
108 
109  workload->Execute();
110 
111  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
112 
113  result.outputExpected = outputExpected;
114 
115  return result;
116 }
117 
118 //
119 // Tests max pooling with the following parameters:
120 //
121 // Pooling size: 3x3
122 // Stride: (2,4)
123 // input size: 8x13
124 // channels: 2
125 // batch size: 2
126 //
127 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
128 LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
129  armnn::IWorkloadFactory& workloadFactory,
131  bool forceNoPadding,
132  float qScale = 1.0f,
133  int32_t qOffset = 0)
134 {
135  armnn::Pooling2dDescriptor descriptor;
137  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
138  descriptor.m_StrideX = 2;
139  descriptor.m_StrideY = 4;
140  // forceNoPadding is mainly used for compatibility with ARM Compute.
141  // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
142  descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
143  descriptor.m_PadTop = descriptor.m_PadBottom = 0;
146 
147  unsigned int inputWidth = 8;
148  unsigned int inputHeight = 13;
149  unsigned int outputWidth =
150  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
151  descriptor.m_StrideX;
152  unsigned int outputHeight =
153  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
154  descriptor.m_StrideY;
155  unsigned int channels = 2;
156  unsigned int batchSize = 2;
157 
158  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
159  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
160 
161  // Set quantization parameters if the requested type is a quantized type.
162  if(armnn::IsQuantizedType<T>())
163  {
164  inputTensorInfo.SetQuantizationScale(qScale);
165  inputTensorInfo.SetQuantizationOffset(qOffset);
166  outputTensorInfo.SetQuantizationScale(qScale);
167  outputTensorInfo.SetQuantizationOffset(qOffset);
168  }
169 
170  std::vector<float> singleChannelData({
171  0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
172  1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
173  8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
174  8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
175  5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
176  1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
177  9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
178  1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
179  6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
180  8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
181  7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
182  4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
183  3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
184  });
185 
186  // Constructs input data.
187  std::vector<float> inputData;
188  auto negator = [](float f) { return -f; };
189 
190  // First image (two channels where the second channel is the negative of the first one).
191  inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
192  std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
193 
194  // Second image (same as first image).
195  inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
196  std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
197 
198  auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
199 
200  // These were calculated manually.
201  auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
202  boost::multi_array<T, 4> outputExpected(shape);
203  if (forceNoPadding)
204  {
205  outputExpected = MakeTensor<T, 4>(outputTensorInfo,
206  QuantizedVector<T>({
207  8.0f, 8.0f, 8.0f,
208  9.0f, 7.0f, 9.0f,
209  9.0f, 9.0f, 9.0f,
210 
211  0.0f, 0.0f, -3.0f,
212  -1.0f, 0.0f, 0.0f,
213  -1.0f, -1.0f, -1.0f,
214 
215  8.0f, 8.0f, 8.0f,
216  9.0f, 7.0f, 9.0f,
217  9.0f, 9.0f, 9.0f,
218 
219  0.0f, 0.0f, -3.0f,
220  -1.0f, 0.0f, 0.0f,
221  -1.0f, -1.0f, -1.0f
222  },
223  qScale, qOffset));
224  }
225  else
226  {
227  outputExpected = MakeTensor<T, 4>(outputTensorInfo,
228  QuantizedVector<T>({
229  0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
230  0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
231  0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
232 
233  0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
234  0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
235  0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
236 
237  0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
238  0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
239  0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
240 
241  0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
242  0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
243  0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
244  },
245  qScale, qOffset));
246  }
247 
248  return SimplePooling2dTestImpl<ArmnnType>(
249  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
250 }
251 
252 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
253 LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
254  armnn::IWorkloadFactory& workloadFactory,
256  const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
257  float qScale = 1.0f,
258  int32_t qOffset = 0)
259 {
260  armnn::Pooling2dDescriptor descriptor;
262  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
263  descriptor.m_StrideX = descriptor.m_StrideY = 2;
265  descriptor.m_DataLayout = dataLayout;
266 
267  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
268  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
269 
270  // Set quantization parameters if the requested type is a quantized type.
271  if(armnn::IsQuantizedType<T>())
272  {
273  inputTensorInfo.SetQuantizationScale(qScale);
274  inputTensorInfo.SetQuantizationOffset(qOffset);
275  outputTensorInfo.SetQuantizationScale(qScale);
276  outputTensorInfo.SetQuantizationOffset(qOffset);
277  }
278 
279  std::vector<T> inputData(
280  QuantizedVector<T>({
281  1.0f, 2.0f, 5.0f, 6.0f,
282  3.0f, 4.0f, 7.0f, 8.0f,
283  9.0f, 10.0f, 13.0f, 14.0f,
284  11.0f, 12.0f, 15.0f, 16.0f,
285 
286  17.0f, 18.0f, 21.0f, 22.0f,
287  19.0f, 20.0f, 23.0f, 24.0f,
288  25.0f, 26.0f, 29.0f, 30.0f,
289  27.0f, 28.0f, 31.0f, 32.0f,
290  },
291  qScale, qOffset));
292 
293  std::vector<T> outputData(
294  QuantizedVector<T>({
295  4.0f, 8.0f,
296  12.0f, 16.0f,
297 
298  20.0f, 24.0f,
299  28.0f, 32.0f,
300  },
301  qScale, qOffset));
302 
303  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
304  if (dataLayout == armnn::DataLayout::NHWC)
305  {
306  std::vector<T> tmp(inputData.size());
307  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
308  inputData = tmp;
309 
310  std::vector<T> tmp1(outputData.size());
311  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
312  outputData = tmp1;
313  }
314 
315  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
316 
317  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
318 
319  return SimplePooling2dTestImpl<ArmnnType>(
320  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
321 }
322 
323 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
324 LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
325  armnn::IWorkloadFactory& workloadFactory,
328  float qScale = 1.0f,
329  int32_t qOffset = 0)
330 {
331  armnn::Pooling2dDescriptor descriptor;
333  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
334  descriptor.m_StrideX = descriptor.m_StrideY = 2;
336  descriptor.m_DataLayout = dataLayout;
337 
338  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
339  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
340 
341  // Set quantization parameters if the requested type is a quantized type.
342  if(armnn::IsQuantizedType<T>())
343  {
344  inputTensorInfo.SetQuantizationScale(qScale);
345  inputTensorInfo.SetQuantizationOffset(qOffset);
346  outputTensorInfo.SetQuantizationScale(qScale);
347  outputTensorInfo.SetQuantizationOffset(qOffset);
348  }
349 
350  std::vector<T> inputData(
351  QuantizedVector<T>({
352  2.0f, 2.0f, 6.0f, 6.0f,
353  4.0f, 4.0f, 8.0f, 8.0f,
354  10.0f, 12.0f, 14.0f, 16.0f,
355  10.0f, 12.0f, 16.0f, 14.0f,
356 
357  18.0f, 20.0f, 24.0f, 22.0f,
358  20.0f, 18.0f, 22.0f, 24.0f,
359  26.0f, 28.0f, 0.0f, 0.0f,
360  26.0f, 28.0f, 0.0f, 0.0f,
361  },
362  qScale, qOffset));
363 
364  std::vector<T> outputData(
365  QuantizedVector<T>({
366  3.0f, 7.0f,
367  11.0f, 15.0f,
368 
369  19.0f, 23.0f,
370  27.0f, 0.0f,
371  },
372  qScale, qOffset));
373 
374  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
375  if (dataLayout == armnn::DataLayout::NHWC)
376  {
377  std::vector<T> tmp(inputData.size());
378  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
379  inputData = tmp;
380 
381  std::vector<T> tmp1(outputData.size());
382  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
383  outputData = tmp1;
384  }
385 
386  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
387 
388  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
389 
390  return SimplePooling2dTestImpl<ArmnnType>(
391  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
392 }
393 
394 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
395 LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
396  armnn::IWorkloadFactory& workloadFactory,
398  float qScale = 1.0f,
399  int32_t qOffset = 0)
400 {
401  armnn::Pooling2dDescriptor descriptor;
403  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
404  descriptor.m_StrideX = descriptor.m_StrideY = 5;
405  descriptor.m_PadLeft = 50;
406  descriptor.m_PadRight = 50;
407  descriptor.m_PadTop = 50;
408  descriptor.m_PadBottom = 50;
410 
411  armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
412  armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
413 
414  // Set quantization parameters if the requested type is a quantized type.
415  if(armnn::IsQuantizedType<T>())
416  {
417  inputTensorInfo.SetQuantizationScale(qScale);
418  inputTensorInfo.SetQuantizationOffset(qOffset);
419  outputTensorInfo.SetQuantizationScale(qScale);
420  outputTensorInfo.SetQuantizationOffset(qOffset);
421  }
422 
423  std::vector<T> inputVec;
424 
425  for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
426  {
427  inputVec.push_back(1);
428  }
429 
430  auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
431 
432  std::vector<T> outputVec;
433 
434  for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
435  {
436  outputVec.push_back(1);
437  }
438 
439  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
440 
441  return SimplePooling2dTestImpl<ArmnnType>(
442  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
443 }
444 
445 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
446 LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
447  armnn::IWorkloadFactory& workloadFactory,
450  float qScale = 1.0f,
451  int32_t qOffset = 0)
452 {
453  armnn::Pooling2dDescriptor descriptor;
455  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
456  descriptor.m_StrideX = descriptor.m_StrideY = 2;
458  descriptor.m_DataLayout = dataLayout;
459 
460  armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
461  armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
462 
463  std::vector<T> inputData(
464  QuantizedVector<T>({
465  1.0f, 7.0f, 5.0f, 5.0f,
466  1.0f, 7.0f, 5.0f, 5.0f,
467  3.0f, 3.0f, 1.0f, 1.0f,
468  3.0f, 3.0f, 1.0f, 1.0f,
469 
470  1.0f, 7.0f, 0.0f, 0.0f,
471  1.0f, 7.0f, 2.0f, 0.0f,
472  0.0f, 2.0f, 1.0f, 1.0f,
473  0.0f, 0.0f, 1.0f, 1.0f,
474  },
475  qScale, qOffset));
476 
477  std::vector<T> outputData(
478  QuantizedVector<T>({
479  5.0f, 5.0f,
480  3.0f, 1.0f,
481 
482  5.0f, 1.0f,
483  1.0f, 1.0f,
484  },
485  qScale, qOffset));
486 
487  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
488  if (dataLayout == armnn::DataLayout::NHWC)
489  {
490  std::vector<T> tmp(inputData.size());
491  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
492  inputData = tmp;
493 
494  std::vector<T> tmp1(outputData.size());
495  armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
496  outputData = tmp1;
497  }
498 
499  auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
500 
501  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
502 
503  return SimplePooling2dTestImpl<ArmnnType>(
504  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
505 }
506 
507 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
508 LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
509  armnn::IWorkloadFactory& workloadFactory,
511  float qScale = 1.0f,
512  int32_t qOffset = 0)
513 {
514  armnn::Pooling2dDescriptor descriptor;
516  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
517  descriptor.m_StrideX = descriptor.m_StrideY = 1;
519 
520  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
521  auto input = MakeTensor<T, 4>(inputTensorInfo,
522  QuantizedVector<T>({
523  2.0f, 1.0f, 5.0f, 2.0f,
524  1.0f, 2.0f, 2.0f, 1.0f,
525  5.0f, 4.0f, 1.0f, 5.0f,
526  2.0f, 1.0f, 5.0f, 2.0f,
527  },
528  qScale, qOffset));
529 
530  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
531  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
532  QuantizedVector<T>({
533  3.0f, 3.0f,
534  3.0f, 3.0f,
535  },
536  qScale, qOffset));
537 
538  return SimplePooling2dTestImpl<ArmnnType>(
539  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
540 }
541 
542 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
543 LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
544  armnn::IWorkloadFactory& workloadFactory,
546  float qScale = 1.0f,
547  int32_t qOffset = 0)
548 {
549  armnn::Pooling2dDescriptor descriptor;
551  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
552  descriptor.m_StrideX = descriptor.m_StrideY = 3;
554 
555  armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
556  auto input = MakeTensor<T, 4>(inputTensorInfo,
557  QuantizedVector<T>({
558  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
559  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
560  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
561  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
562  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
563  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
564  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
565  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
566  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
567  },
568  qScale, qOffset));
569 
570  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
571  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
572  QuantizedVector<T>({
573  3.0f, 3.0f, 3.0f,
574  3.0f, 3.0f, 3.0f,
575  3.0f, 3.0f, 3.0f,
576  },
577  qScale, qOffset));
578 
579  return SimplePooling2dTestImpl<ArmnnType>(
580  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
581 }
582 
583 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
584 LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
585  armnn::IWorkloadFactory& workloadFactory,
587  float qScale = 1.0f,
588  int32_t qOffset = 0)
589 {
590  armnn::Pooling2dDescriptor descriptor;
592  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
593  descriptor.m_StrideX = descriptor.m_StrideY = 4;
595 
596  armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
597  auto input = MakeTensor<T, 4>(inputTensorInfo,
598  QuantizedVector<T>({
599  2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
600  1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
601  5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
602  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
603  2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
604  1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
605  5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
606  },
607  qScale, qOffset));
608 
609  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
610  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
611  QuantizedVector<T>({
612  3.0f, 3.0f,
613  3.0f, 3.0f,
614  },
615  qScale, qOffset));
616 
617  return SimplePooling2dTestImpl<ArmnnType>(
618  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
619 }
620 
621 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
622 LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
623  armnn::IWorkloadFactory& workloadFactory,
625  float qScale = 1.0f,
626  int32_t qOffset = 0)
627 {
628  armnn::Pooling2dDescriptor descriptor;
630  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
631  descriptor.m_StrideX = descriptor.m_StrideY = 7;
633 
634  armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
635  auto input = MakeTensor<T, 4>(inputTensorInfo,
636  QuantizedVector<T>({
637  1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
638  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
639  0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
640  8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
641  0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
642  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
643  0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
644  },
645  qScale, qOffset));
646 
647  armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
648  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
649  QuantizedVector<T>({
650  3.0f,
651  },
652  qScale, qOffset));
653 
654  return SimplePooling2dTestImpl<ArmnnType>(
655  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
656 }
657 
658 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
659 LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
660  armnn::IWorkloadFactory& workloadFactory,
662  float qScale = 1.0f,
663  int32_t qOffset = 0)
664 {
665  armnn::Pooling2dDescriptor descriptor;
667  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
668  descriptor.m_StrideX = descriptor.m_StrideY = 9;
670 
671  armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
672  auto input = MakeTensor<T, 4>(inputTensorInfo,
673  QuantizedVector<T>({
674  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
675  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
676  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
677  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
678  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
679  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
680  2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
681  1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
682  5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
683  },
684  qScale, qOffset));
685 
686  armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
687  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
688  QuantizedVector<T>({
689  3.0f,
690  },
691  qScale, qOffset));
692 
693  return SimplePooling2dTestImpl<ArmnnType>(
694  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
695 }
696 
697 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
698 LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
699  armnn::IWorkloadFactory& workloadFactory,
701  float qScale = 1.0f,
702  int32_t qOffset = 0)
703 {
704  armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
705  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
706 
707  armnn::Pooling2dDescriptor descriptor;
709  descriptor.m_PoolWidth = 2;
710  descriptor.m_PoolHeight = 3;
711  descriptor.m_StrideX = 2;
712  descriptor.m_StrideY = 1;
713  descriptor.m_PadLeft = 2;
714  descriptor.m_PadRight = 0;
715  descriptor.m_PadTop = 1;
716  descriptor.m_PadBottom = 2;
717  descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
718  descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
719 
720  // Construct input data.
721  auto input = MakeTensor<T, 4>(inputTensorInfo,
722  QuantizedVector<T>({
723  1.0f, 3.0f, 4.0f,
724  },
725  qScale, qOffset));
726 
727  // These were calculated manually.
728  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
729  QuantizedVector<T>({
730  0.0f, 3.0f, 0.0f, 3.0f,
731  },
732  qScale, qOffset));
733 
734  return SimplePooling2dTestImpl<ArmnnType>(
735  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
736 }
737 
738 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
739 LayerTestResult<T, 4> ComparePooling2dTestCommon(
740  armnn::IWorkloadFactory& workloadFactory,
742  armnn::IWorkloadFactory& refWorkloadFactory,
743  armnn::PoolingAlgorithm poolingType,
744  float qScale = 1.0f,
745  int32_t qOffset = 0)
746 {
747  IgnoreUnused(memoryManager);
748  const unsigned int inputWidth = 16;
749  const unsigned int inputHeight = 32;
750  const unsigned int channelCount = 2;
751  const unsigned int batchSize = 5;
752 
753  const unsigned int poolSize = 3;
754  const unsigned int strideX = 2;
755  const unsigned int strideY = 4;
756  const unsigned int padX = 0;
757  const unsigned int padY = 0;
758 
759  const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
760  const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
761 
762  armnn::TensorInfo inputTensorInfo;
763  armnn::TensorInfo outputTensorInfo;
764 
765  unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
766  unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
767 
768  inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
769  outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
770 
771  // Set quantization parameters if the requested type is a quantized type.
772  if(armnn::IsQuantizedType<T>())
773  {
774  inputTensorInfo.SetQuantizationScale(qScale);
775  inputTensorInfo.SetQuantizationOffset(qOffset);
776  outputTensorInfo.SetQuantizationScale(qScale);
777  outputTensorInfo.SetQuantizationOffset(qOffset);
778  }
779 
780  boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
781 
782  LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
783 
785  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
786  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
788 
791  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
792  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
793  data.m_Parameters.m_PoolType = poolingType;
794  data.m_Parameters.m_PoolWidth = poolSize;
795  data.m_Parameters.m_PoolHeight = poolSize;
796  data.m_Parameters.m_StrideX = strideX;
797  data.m_Parameters.m_StrideY = strideY;
798  data.m_Parameters.m_PadLeft = padX;
799  data.m_Parameters.m_PadRight = padX;
800  data.m_Parameters.m_PadTop = padY;
801  data.m_Parameters.m_PadBottom = padY;
802  data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
803 
805  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
806  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
808 
809  // Don't execute if Pooling is not supported, as an exception will be raised.
810  armnn::BackendId backend = workloadFactory.GetBackendId();
811  const size_t reasonIfUnsupportedMaxLen = 255;
812  char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
813  comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
814  data.m_Parameters,
815  reasonIfUnsupported, reasonIfUnsupportedMaxLen);
816  if (!comparisonResult.supported)
817  {
818  return comparisonResult;
819  }
820 
821  armnn::Pooling2dQueueDescriptor refData = data;
822  armnn::WorkloadInfo refInfo = info;
823  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
824  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
825 
826  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
827  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
828 
829  outputHandleRef->Allocate();
830  inputHandleRef->Allocate();
831  inputHandle->Allocate();
832  outputHandle->Allocate();
833 
834  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
835  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
836 
837  workload->Execute();
838  workloadRef->Execute();
839 
840  CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
841  CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
842 
843  return comparisonResult;
844 }
845 
846 //
847 // Tests max pooling with the following parameters:
848 //
849 // Pooling size: 2x2
850 // Stride: (2,2)
851 // input size: 4x4
852 // channels: 1
853 // batch size: 1
854 //
855 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
856 LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
857  armnn::IWorkloadFactory& workloadFactory,
859  bool forceNoPadding,
860  float qScale = 1.0f,
861  int32_t qOffset = 0)
862 {
863  armnn::Pooling2dDescriptor descriptor;
865  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
866  descriptor.m_StrideX = 2;
867  descriptor.m_StrideY = 2;
868  descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
869  descriptor.m_PadTop = descriptor.m_PadBottom = 0;
872 
873 
874  unsigned int inputWidth = 4;
875 
876  unsigned int inputHeight = 4;
877 
878  unsigned int outputWidth =
879  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
880  descriptor.m_StrideX;
881  unsigned int outputHeight =
882  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
883  descriptor.m_StrideY;
884  unsigned int channels = 1;
885  unsigned int batchSize = 1;
886 
887  std::vector<float> inputData = {
888  510.0f, 222.0f, 780.0f, 654.0f,
889  141.0f, 276.0f, 15.0f, 546.0f,
890  303.0f, 618.0f, 582.0f, 339.0f,
891  438.0f, 564.0f, 573.0f, 402.0f
892  };
893 
894  // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
895  std::vector<float> expectedOutputDataWithPadding = {
896  0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
897  0.0f, 438.0f, 618.0f, 402.0f, 0.0f
898  };
899 
900  std::vector<float> expectedOutputDataNoPadding = {
901  510.0f, 780.0f,
902  618.0f, 582.0f
903  };
904 
905  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
906 
907  // Scale and offset should match input - we're just calculating maximum values.
908  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
909 
910  // Set quantization parameters if the requested type is a quantized type.
911  if(armnn::IsQuantizedType<T>())
912  {
913  inputTensorInfo.SetQuantizationScale(qScale);
914  inputTensorInfo.SetQuantizationOffset(qOffset);
915  outputTensorInfo.SetQuantizationScale(qScale);
916  outputTensorInfo.SetQuantizationOffset(qOffset);
917  }
918 
919  auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
920 
921  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
922  forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
923  QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
924 
925  return SimplePooling2dTestImpl<ArmnnType>(
926  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
927 }
928 
929 //
930 // Tests max pooling with the following parameters:
931 //
932 // Pooling size: 3x2
933 // Stride: (2,2)
934 // input size: 3x2
935 // channels: 1
936 // batch size: 1
937 //
938 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
939 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
940  armnn::IWorkloadFactory& workloadFactory,
942  bool forceNoPadding,
943  float qScale = 1.0f,
944  int32_t qOffset = 0)
945 {
946  armnn::Pooling2dDescriptor descriptor;
948  descriptor.m_PoolWidth = 3;
949  descriptor.m_PoolHeight = 2;
950  descriptor.m_StrideX = 2;
951  descriptor.m_StrideY = 2;
952  descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
953  descriptor.m_PadRight = descriptor.m_PadLeft;
954  descriptor.m_PadTop = 0;
955  descriptor.m_PadBottom = 0;
958 
959  unsigned int inputWidth = 3;
960  unsigned int inputHeight = 2;
961  unsigned int outputWidth =
962  (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
963  descriptor.m_StrideX;
964  unsigned int outputHeight =
965  (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
966  descriptor.m_StrideY;
967  unsigned int channels = 1;
968  unsigned int batchSize = 1;
969 
970  std::vector<float> inputData = {
971  3.0f, 6.0f, 9.0f,
972  12.0f, 15.0f, 18.0f,
973  };
974 
975  std::vector<float> expectedOutputDataWithPadding = {
976  6.0f, 8.0f,
977  };
978 
979  std::vector<float> expectedOutputDataNoPadding = {
980  10.5f,
981  };
982 
983  armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
984 
985  // Scale and offset should match input - we're just calculating average values.
986  armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
987 
988  // Set quantization parameters if the requested type is a quantized type.
989  if(armnn::IsQuantizedType<T>())
990  {
991  inputTensorInfo.SetQuantizationScale(qScale);
992  inputTensorInfo.SetQuantizationOffset(qOffset);
993  outputTensorInfo.SetQuantizationScale(qScale);
994  outputTensorInfo.SetQuantizationOffset(qOffset);
995  }
996 
997  auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
998 
999  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1000  forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
1001  QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
1002 
1003  return SimplePooling2dTestImpl<ArmnnType>(
1004  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1005 }
1006 
1007 
1008 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1009 LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
1010  armnn::IWorkloadFactory& workloadFactory,
1012  float qScale = 1.0f,
1013  int32_t qOffset = 0)
1014 {
1015  armnn::Pooling2dDescriptor descriptor;
1017  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1018  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1019  descriptor.m_PadLeft = 1;
1020  descriptor.m_PadRight = 1;
1021  descriptor.m_PadTop = 1;
1022  descriptor.m_PadBottom = 1;
1024 
1025  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1026  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1027 
1028  // Set quantization parameters if the requested type is a quantized type.
1029  if(armnn::IsQuantizedType<T>())
1030  {
1031  inputTensorInfo.SetQuantizationScale(qScale);
1032  inputTensorInfo.SetQuantizationOffset(qOffset);
1033  outputTensorInfo.SetQuantizationScale(qScale);
1034  outputTensorInfo.SetQuantizationOffset(qOffset);
1035  }
1036 
1037  auto input = MakeTensor<T, 4>(inputTensorInfo,
1038  QuantizedVector<T>({
1039  -1.0f, -2.0f, 3.0f, 4.0f,
1040  -1.0f, -2.0f, 3.0f, 4.0f,
1041  1.0f, 2.0f, -3.0f, -4.0f,
1042  1.0f, 2.0f, -3.0f, -4.0f,
1043  },
1044  qScale, qOffset));
1045 
1046  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1047  QuantizedVector<T>({
1048  -1.0f, 3.0f, 4.0f,
1049  1.0f, 3.0f, 4.0f,
1050  1.0f, 2.0f, -4.0f,
1051  },
1052  qScale, qOffset));
1053 
1054  return SimplePooling2dTestImpl<ArmnnType>(
1055  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1056 }
1057 
1058 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1059 LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1060  armnn::IWorkloadFactory& workloadFactory,
1062  float qScale = 1.0f,
1063  int32_t qOffset = 0)
1064 {
1065  armnn::Pooling2dDescriptor descriptor;
1067  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1068  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1069  descriptor.m_PadLeft = 1;
1070  descriptor.m_PadRight = 1;
1071  descriptor.m_PadTop = 1;
1072  descriptor.m_PadBottom = 1;
1074 
1075  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1076  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1077 
1078  // Set quantization parameters if the requested type is a quantized type.
1079  if(armnn::IsQuantizedType<T>())
1080  {
1081  inputTensorInfo.SetQuantizationScale(qScale);
1082  inputTensorInfo.SetQuantizationOffset(qOffset);
1083  outputTensorInfo.SetQuantizationScale(qScale);
1084  outputTensorInfo.SetQuantizationOffset(qOffset);
1085  }
1086 
1087  auto input = MakeTensor<T, 4>(inputTensorInfo,
1088  QuantizedVector<T>({
1089  -1.0f, -2.0f, 3.0f, 4.0f,
1090  -1.0f, -2.0f, 3.0f, 4.0f,
1091  1.0f, 2.0f, -3.0f, -4.0f,
1092  1.0f, 2.0f, -3.0f, -4.0f,
1093  },
1094  qScale, qOffset));
1095 
1096  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1097  QuantizedVector<T>({
1098  -1.0f, 3.0f, 4.0f, 4.0f,
1099  2.0f, 3.0f, 4.0f, 4.0f,
1100  2.0f, 3.0f, 4.0f, 4.0f,
1101  2.0f, 2.0f, 2.0f, -3.0f,
1102  },
1103  qScale, qOffset));
1104 
1105  return SimplePooling2dTestImpl<ArmnnType>(
1106  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1107 }
1108 
1109 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1110 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1111  armnn::IWorkloadFactory& workloadFactory,
1113  float qScale = 1.0f,
1114  int32_t qOffset = 0)
1115 {
1116  armnn::Pooling2dDescriptor descriptor;
1118  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1119  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1120  descriptor.m_PadLeft = 1;
1121  descriptor.m_PadRight = 1;
1122  descriptor.m_PadTop = 1;
1123  descriptor.m_PadBottom = 1;
1125 
1126  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1127  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1128 
1129  // Set quantization parameters if the requested type is a quantized type.
1130  if(armnn::IsQuantizedType<T>())
1131  {
1132  inputTensorInfo.SetQuantizationScale(qScale);
1133  inputTensorInfo.SetQuantizationOffset(qOffset);
1134  outputTensorInfo.SetQuantizationScale(qScale);
1135  outputTensorInfo.SetQuantizationOffset(qOffset);
1136  }
1137 
1138  auto input = MakeTensor<T, 4>(inputTensorInfo,
1139  QuantizedVector<T>({
1140  12.0f, 20.0f, 32.0f, 40.0f,
1141  12.0f, 20.0f, 32.0f, 40.0f,
1142  12.0f, 20.0f, 32.0f, 40.0f,
1143  12.0f, 20.0f, 32.0f, 40.0f,
1144  },
1145  qScale, qOffset));
1146 
1147  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1148  QuantizedVector<T>({
1149  3.0f, 13.0f, 10.0f,
1150  6.0f, 26.0f, 20.0f,
1151  3.0f, 13.0f, 10.0f,
1152  },
1153  qScale, qOffset));
1154 
1155  return SimplePooling2dTestImpl<ArmnnType>(
1156  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1157 }
1158 
1159 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1160 LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1161  armnn::IWorkloadFactory& workloadFactory,
1163  float qScale = 1.0f,
1164  int32_t qOffset = 0)
1165 {
1166  armnn::Pooling2dDescriptor descriptor;
1168  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1169  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1170  descriptor.m_PadLeft = 0;
1171  descriptor.m_PadRight = 0;
1172  descriptor.m_PadTop = 0;
1173  descriptor.m_PadBottom = 0;
1176 
1177  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1178  armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
1179 
1180  // Set quantization parameters if the requested type is a quantized type.
1181  if(armnn::IsQuantizedType<T>())
1182  {
1183  inputTensorInfo.SetQuantizationScale(qScale);
1184  inputTensorInfo.SetQuantizationOffset(qOffset);
1185  outputTensorInfo.SetQuantizationScale(qScale);
1186  outputTensorInfo.SetQuantizationOffset(qOffset);
1187  }
1188 
1189  auto input = MakeTensor<T, 4>(inputTensorInfo,
1190  QuantizedVector<T>({
1191  1.0f, 2.0f, 3.0f, 4.0f,
1192  1.0f, 2.0f, 3.0f, 4.0f,
1193  1.0f, 2.0f, 3.0f, 4.0f,
1194  1.0f, 2.0f, 3.0f, 4.0f,
1195  },
1196  qScale, qOffset));
1197 
1198  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1199  QuantizedVector<T>({
1200  2.0f, 3.5f,
1201  2.0f, 3.5f
1202  },
1203  qScale, qOffset));
1204 
1205  return SimplePooling2dTestImpl<ArmnnType>(
1206  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1207 }
1208 
1209 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1210 LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1211  armnn::IWorkloadFactory& workloadFactory,
1213  float qScale = 1.0f,
1214  int32_t qOffset = 0)
1215 {
1216  armnn::Pooling2dDescriptor descriptor;
1218  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1219  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1220  descriptor.m_PadLeft = 1;
1221  descriptor.m_PadRight = 1;
1222  descriptor.m_PadTop = 1;
1223  descriptor.m_PadBottom = 1;
1225 
1226  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1227  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1228 
1229  // Set quantization parameters if the requested type is a quantized type.
1230  if(armnn::IsQuantizedType<T>())
1231  {
1232  inputTensorInfo.SetQuantizationScale(qScale);
1233  inputTensorInfo.SetQuantizationOffset(qOffset);
1234  outputTensorInfo.SetQuantizationScale(qScale);
1235  outputTensorInfo.SetQuantizationOffset(qOffset);
1236  }
1237 
1238  auto input = MakeTensor<T, 4>(inputTensorInfo,
1239  QuantizedVector<T>({
1240  9.0f, 27.0f, 18.0f, 36.0f,
1241  18.0f, 9.0f, 18.0f, 9.0f,
1242  27.0f, 18.0f, 9.0f, 27.0f,
1243  9.0f, 27.0f, 9.0f, 18.0f,
1244  },
1245  qScale, qOffset));
1246 
1247  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1248  QuantizedVector<T>({
1249  7.0f, 11.0f, 13.0f, 9.0f,
1250  12.0f, 17.0f, 19.0f, 13.0f,
1251  12.0f, 16.0f, 16.0f, 10.0f,
1252  9.0f, 11.0f, 12.0f, 7.0f,
1253  },
1254  qScale, qOffset));
1255 
1256  return SimplePooling2dTestImpl<ArmnnType>(
1257  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1258 }
1259 
1260 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1261 LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1262  armnn::IWorkloadFactory& workloadFactory,
1264  float qScale = 1.0f,
1265  int32_t qOffset = 0)
1266 {
1267  armnn::Pooling2dDescriptor descriptor;
1269  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1270  descriptor.m_StrideX = descriptor.m_StrideY = 2;
1271  descriptor.m_PadLeft = 1;
1272  descriptor.m_PadRight = 1;
1273  descriptor.m_PadTop = 1;
1274  descriptor.m_PadBottom = 1;
1276 
1277  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1278  armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
1279 
1280  // Set quantization parameters if the requested type is a quantized type.
1281  if(armnn::IsQuantizedType<T>())
1282  {
1283  inputTensorInfo.SetQuantizationScale(qScale);
1284  inputTensorInfo.SetQuantizationOffset(qOffset);
1285  outputTensorInfo.SetQuantizationScale(qScale);
1286  outputTensorInfo.SetQuantizationOffset(qOffset);
1287  }
1288 
1289  auto input = MakeTensor<T, 4>(inputTensorInfo,
1290  QuantizedVector<T>({
1291  2.0f, 4.0f, 8.0f, 16.0f,
1292  4.0f, 2.0f, 2.0f, 4.0f,
1293  8.0f, 2.0f, 4.0f, 2.0f,
1294  16.0f, 2.0f, 2.0f, 8.0f,
1295  },
1296  qScale, qOffset));
1297 
1298  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1299  QuantizedVector<T>({
1300  1.0f, 4.4721f, 8.0f,
1301  4.4721f, 2.6457f, 2.236f,
1302  8.0f, 1.4142f, 4.0f,
1303  },
1304  qScale, qOffset));
1305 
1306  return SimplePooling2dTestImpl<ArmnnType>(
1307  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1308 }
1309 
1310 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1311 LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1312  armnn::IWorkloadFactory& workloadFactory,
1314  float qScale = 1.0f,
1315  int32_t qOffset = 0)
1316 {
1317  armnn::Pooling2dDescriptor descriptor;
1319  descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1320  descriptor.m_StrideX = descriptor.m_StrideY = 1;
1321  descriptor.m_PadLeft = 1;
1322  descriptor.m_PadRight = 1;
1323  descriptor.m_PadTop = 1;
1324  descriptor.m_PadBottom = 1;
1326 
1327  armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1328  armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1329 
1330  // Set quantization parameters if the requested type is a quantized type.
1331  if(armnn::IsQuantizedType<T>())
1332  {
1333  inputTensorInfo.SetQuantizationScale(qScale);
1334  inputTensorInfo.SetQuantizationOffset(qOffset);
1335  outputTensorInfo.SetQuantizationScale(qScale);
1336  outputTensorInfo.SetQuantizationOffset(qOffset);
1337  }
1338 
1339  auto input = MakeTensor<T, 4>(inputTensorInfo,
1340  QuantizedVector<T>({
1341  1.0f, 2.0f, 3.0f, 4.0f,
1342  1.0f, 2.0f, 3.0f, 4.0f,
1343  1.0f, 2.0f, 3.0f, 4.0f,
1344  1.0f, 2.0f, 3.0f, 4.0f,
1345  },
1346  qScale, qOffset));
1347 
1348  auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1349  QuantizedVector<T>({
1350  1.0540f, 1.7638f, 2.5385f, 2.3570f,
1351  1.2909f, 2.1602f, 3.1091f, 2.8867f,
1352  1.2909f, 2.1602f, 3.1091f, 2.8867f,
1353  1.0540f, 1.7638f, 2.5385f, 2.3570f,
1354  },
1355  qScale, qOffset));
1356 
1357  return SimplePooling2dTestImpl<ArmnnType>(
1358  workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
1359 }
1360 
1361 } // anonymous namespace
1362 
1364  armnn::IWorkloadFactory& workloadFactory,
1366  bool forceNoPadding)
1367 {
1368  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1369  workloadFactory, memoryManager, forceNoPadding);
1370 }
1371 
1373  armnn::IWorkloadFactory& workloadFactory,
1375  bool forceNoPadding)
1376 {
1377  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
1378  workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
1379 }
1380 
1382  armnn::IWorkloadFactory& workloadFactory,
1384  bool forceNoPadding)
1385 {
1386  return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
1387  workloadFactory, memoryManager, forceNoPadding);
1388 }
1389 
1391  armnn::IWorkloadFactory& workloadFactory,
1393  bool forceNoPadding)
1394 {
1395  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1396  workloadFactory, memoryManager, forceNoPadding);
1397 }
1398 
1400  armnn::IWorkloadFactory& workloadFactory,
1402  bool forceNoPadding)
1403 {
1404  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
1405  workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
1406 }
1407 
1409  armnn::IWorkloadFactory& workloadFactory,
1411  bool forceNoPadding)
1412 {
1413  return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
1414  workloadFactory, memoryManager, forceNoPadding);
1415 }
1416 
1418  armnn::IWorkloadFactory& workloadFactory,
1420  const armnn::DataLayout dataLayout)
1421 {
1422  return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1423 }
1424 
1426  armnn::IWorkloadFactory& workloadFactory,
1428  const armnn::DataLayout dataLayout)
1429 {
1430  return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
1431 }
1432 
1434  armnn::IWorkloadFactory& workloadFactory,
1436  const armnn::DataLayout dataLayout)
1437 {
1438  return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
1439 }
1441  armnn::IWorkloadFactory& workloadFactory,
1443 {
1444  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1445 }
1446 
1448  armnn::IWorkloadFactory& workloadFactory,
1450 {
1451  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1452  workloadFactory, memoryManager, 1.0f, -5);
1453 }
1454 
1456  armnn::IWorkloadFactory& workloadFactory,
1458 {
1459  return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1460  workloadFactory, memoryManager);
1461 }
1462 
1464  armnn::IWorkloadFactory& workloadFactory,
1466 {
1467  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1468 }
1469 
1471  armnn::IWorkloadFactory& workloadFactory,
1473 {
1474  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1475  workloadFactory, memoryManager, 1.0f, -5);
1476 }
1477 
1479  armnn::IWorkloadFactory& workloadFactory,
1481 {
1482  return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1483  workloadFactory, memoryManager);
1484 }
1485 
1487  armnn::IWorkloadFactory& workloadFactory,
1489  const armnn::DataLayout dataLayout)
1490 {
1491  return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1492 }
1493 
1495  armnn::IWorkloadFactory& workloadFactory,
1497  const armnn::DataLayout dataLayout)
1498 {
1499  return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1500  workloadFactory, memoryManager, dataLayout, 0.5, -1);
1501 }
1502 
1504  armnn::IWorkloadFactory& workloadFactory,
1506  const armnn::DataLayout dataLayout)
1507 {
1508  return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1509  workloadFactory, memoryManager, dataLayout);
1510 }
1511 
1513  armnn::IWorkloadFactory& workloadFactory,
1515  bool forceNoPadding)
1516 {
1517  return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1518  workloadFactory, memoryManager, forceNoPadding);
1519 }
1520 
1522  armnn::IWorkloadFactory& workloadFactory,
1524 {
1525  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1526 }
1527 
1529  armnn::IWorkloadFactory& workloadFactory,
1531 {
1532  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1533  workloadFactory, memoryManager, 0.5, -1);
1534 }
1535 
1537  armnn::IWorkloadFactory& workloadFactory,
1539 {
1540  return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1541  workloadFactory, memoryManager);
1542 }
1544  armnn::IWorkloadFactory& workloadFactory,
1546 {
1547  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1548 }
1549 
1551  armnn::IWorkloadFactory& workloadFactory,
1553 {
1554  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1555  workloadFactory, memoryManager);
1556 }
1557 
1559  armnn::IWorkloadFactory& workloadFactory,
1561 {
1562  return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
1563  workloadFactory, memoryManager);
1564 }
1565 
1567  armnn::IWorkloadFactory& workloadFactory,
1569 {
1570  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1571  workloadFactory, memoryManager);
1572 }
1573 
1575  armnn::IWorkloadFactory& workloadFactory,
1577 {
1578  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
1579  workloadFactory, memoryManager);
1580 }
1581 
1583  armnn::IWorkloadFactory& workloadFactory,
1585 {
1586  return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
1587  workloadFactory, memoryManager);
1588 }
1589 
1591  armnn::IWorkloadFactory& workloadFactory,
1593 {
1594  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1595 }
1596 
1598  armnn::IWorkloadFactory& workloadFactory,
1600 {
1601  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1602  workloadFactory, memoryManager);
1603 }
1604 
1606  armnn::IWorkloadFactory& workloadFactory,
1608 {
1609  return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1610  workloadFactory, memoryManager);
1611 }
1612 
1614  armnn::IWorkloadFactory& workloadFactory,
1616  const armnn::DataLayout dataLayout)
1617 {
1618  return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1619 }
1620 
1622  armnn::IWorkloadFactory& workloadFactory,
1624  const armnn::DataLayout dataLayout)
1625 {
1626  return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
1627 }
1628 
1630  armnn::IWorkloadFactory& workloadFactory,
1632  const armnn::DataLayout dataLayout)
1633 {
1634  return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
1635 }
1636 
1638  armnn::IWorkloadFactory& workloadFactory,
1640 {
1641  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1642 }
1643 
1645  armnn::IWorkloadFactory& workloadFactory,
1647 {
1648  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1649 }
1650 
1652  armnn::IWorkloadFactory& workloadFactory,
1654 {
1655  return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1656 }
1657 
1659  armnn::IWorkloadFactory& workloadFactory,
1661 {
1662  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1663 }
1664 
1666  armnn::IWorkloadFactory& workloadFactory,
1668 {
1669  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1670 }
1671 
1673  armnn::IWorkloadFactory& workloadFactory,
1675 {
1676  return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1677 }
1679  armnn::IWorkloadFactory& workloadFactory,
1681 {
1682  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1683 }
1684 
1686  armnn::IWorkloadFactory& workloadFactory,
1688 {
1689  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1690 }
1691 
1693  armnn::IWorkloadFactory& workloadFactory,
1695 {
1696  return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1697 }
1698 
1700  armnn::IWorkloadFactory& workloadFactory,
1702 {
1703  return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1704 }
1705 
1707  armnn::IWorkloadFactory& workloadFactory,
1709 {
1710  return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1711 }
1712 
1714  armnn::IWorkloadFactory& workloadFactory,
1716 {
1717  return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1718 }
1719 
1721  armnn::IWorkloadFactory& workloadFactory,
1723 {
1724  return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1725 }
1726 
1728  armnn::IWorkloadFactory& workloadFactory,
1730 {
1731  return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1732 }
1733 
1735  armnn::IWorkloadFactory& workloadFactory,
1737 {
1738  return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1739 }
1741  armnn::IWorkloadFactory& workloadFactory,
1743 {
1744  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1745 }
1746 
1748  armnn::IWorkloadFactory& workloadFactory,
1750 {
1751  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1752 }
1753 
1755  armnn::IWorkloadFactory& workloadFactory,
1757 {
1758  return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1759 }
1760 
1762  armnn::IWorkloadFactory& workloadFactory,
1764 {
1765  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1766 }
1767 
1769  armnn::IWorkloadFactory& workloadFactory,
1771 {
1772  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1773 }
1774 
1776  armnn::IWorkloadFactory& workloadFactory,
1778 {
1779  return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1780 }
1781 
1783  armnn::IWorkloadFactory& workloadFactory,
1785 {
1786  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1787 }
1788 
1790  armnn::IWorkloadFactory& workloadFactory,
1792 {
1793  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
1794 }
1795 
1797  armnn::IWorkloadFactory& workloadFactory,
1799 {
1800  return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
1801 }
1802 
1804  armnn::IWorkloadFactory& workloadFactory,
1806  armnn::IWorkloadFactory& refWorkloadFactory,
1807  armnn::PoolingAlgorithm poolingType)
1808 {
1809  return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1810  workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1811 }
1812 
1814  armnn::IWorkloadFactory& workloadFactory,
1816  armnn::IWorkloadFactory& refWorkloadFactory,
1817  armnn::PoolingAlgorithm poolingType)
1818 {
1819  return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1820  workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
1821 }
1822 
1824  armnn::IWorkloadFactory& workloadFactory,
1826  armnn::IWorkloadFactory& refWorkloadFactory,
1827  armnn::PoolingAlgorithm poolingType)
1828 {
1829  return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
1830  workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1831 }
LayerTestResult< float, 4 > L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified...
Definition: Tensor.cpp:182
LayerTestResult< int16_t, 4 > IgnorePaddingL2Pooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual const BackendId & GetBackendId() const =0
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
DataLayout
Definition: Types.hpp:49
unsigned int GetWidthIndex() const
LayerTestResult< float, 4 > ComparePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
uint32_t m_PadLeft
Padding left value in the width dimension.
LayerTestResult< float, 4 > IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > L2Pooling2dSize7Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
LayerTestResult< float, 4 > SimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > ComparePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
LayerTestResult< uint8_t, 4 > IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PoolWidth
Pooling width value.
LayerTestResult< float, 4 > LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > SimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > IgnorePaddingAveragePooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > ComparePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, armnn::PoolingAlgorithm poolingType)
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
uint32_t m_PadTop
Padding top value in the height dimension.
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
void IgnoreUnused(Ts &&...)
PoolingAlgorithm
Definition: Types.hpp:96
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< float, 4 > L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetHeightIndex() const
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > IgnorePaddingMaxPooling2dSize3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
uint32_t m_PoolHeight
Pooling height value.
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride3Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PadRight
Padding right value in the width dimension.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< float, 4 > SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< uint8_t, 4 > IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > L2Pooling2dSize9Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:33
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > AsymmetricNonSquarePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride1Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< float, 4 > IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
The padding fields count, but are ignored.
LayerTestResult< int16_t, 4 > L2Pooling2dSize3Stride4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > L2Pooling2dSize7Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
LayerTestResult< int16_t, 4 > SimpleMaxPooling2dSize3x3Stride2x4Int16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:38
LayerTestResult< float, 4 > SimpleMaxPooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
LayerTestResult< float, 4 > AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:481
LayerTestResult< float, 4 > L2Pooling2dSize9Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > SimpleL2Pooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout dataLayout)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< int16_t, 4 > LargeTensorsAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
unsigned int GetChannelsIndex() const
LayerTestResult< int16_t, 4 > IgnorePaddingSimpleAveragePooling2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, bool forceNoPadding)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)