ArmNN
 21.05
PadTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "PadTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 
12 
13 #include <test/TensorHelpers.hpp>
14 
15 //
16 // Implementation templates
17 //
18 
19 template<armnn::DataType ArmnnType, typename T>
21  armnn::IWorkloadFactory& workloadFactory,
23  const armnn::ITensorHandleFactory& tensorHandleFactory,
24  float qScale,
25  int32_t qOffset,
26  const float customPaddingValue)
27 {
28  IgnoreUnused(memoryManager);
29  const armnn::TensorShape inputShape{ 3, 3 };
30  const armnn::TensorShape outputShape{ 7, 7 };
31 
32  const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
33  const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
34 
35  std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
36  {
37  // Height (3) x Width (3)
38  4, 8, 6,
39  7, 4, 4,
40  3, 2, 4
41  },
42  qScale, qOffset);
43 
44  auto p = customPaddingValue;
45  std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
46  {
47  p, p, p, p, p, p, p,
48  p, p, p, p, p, p, p,
49  p, p, 4, 8, 6, p, p,
50  p, p, 7, 4, 4, p, p,
51  p, p, 3, 2, 4, p, p,
52  p, p, p, p, p, p, p,
53  p, p, p, p, p, p, p
54  },
55  qScale, qOffset);
56 
57  auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
58 
59  LayerTestResult<T, 2> result(outputTensorInfo);
60  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
61 
62  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
63  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
64 
65 
66  armnn::PadQueueDescriptor descriptor;
67 
68  std::vector<std::pair<unsigned int, unsigned int>> padList;
69  padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
70  padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
71 
72  descriptor.m_Parameters.m_PadList = padList;
73  descriptor.m_Parameters.m_PadValue = customPaddingValue;
75 
76  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
77  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
78 
79  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
80 
81  inputHandle->Allocate();
82  outputHandle->Allocate();
83 
84  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
85 
86  workload->PostAllocationConfigure();
87  workload->Execute();
88 
89  CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
90 
91  return result;
92 }
93 
94 template<armnn::DataType ArmnnType, typename T>
96  armnn::IWorkloadFactory& workloadFactory,
98  const armnn::ITensorHandleFactory& tensorHandleFactory,
99  float qScale,
100  int32_t qOffset)
101 {
102  IgnoreUnused(memoryManager);
103  const armnn::TensorShape inputShape{ 2, 2, 2 };
104  const armnn::TensorShape outputShape{ 3, 5, 6 };
105 
106  const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
107  const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
108 
109  std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
110  {
111  // Channel 0, Height (2) x Width (2)
112  0, 4,
113  2, 5,
114 
115  // Channel 1, Height (2) x Width (2)
116  6, 1,
117  5, 2
118  },
119  qScale, qOffset);
120 
121  std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
122  {
123  0, 0, 0, 0, 0, 0,
124  0, 0, 0, 0, 0, 0,
125  0, 0, 0, 4, 0, 0,
126  0, 0, 2, 5, 0, 0,
127  0, 0, 0, 0, 0, 0,
128 
129  0, 0, 0, 0, 0, 0,
130  0, 0, 0, 0, 0, 0,
131  0, 0, 6, 1, 0, 0,
132  0, 0, 5, 2, 0, 0,
133  0, 0, 0, 0, 0, 0,
134 
135  0, 0, 0, 0, 0, 0,
136  0, 0, 0, 0, 0, 0,
137  0, 0, 0, 0, 0, 0,
138  0, 0, 0, 0, 0, 0,
139  0, 0, 0, 0, 0, 0
140  },
141  qScale, qOffset);
142 
143  auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
144 
145  LayerTestResult<T, 3> result(outputTensorInfo);
146  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
147 
148  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
149  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
150 
151 
152  armnn::PadQueueDescriptor descriptor;
153 
154  std::vector<std::pair<unsigned int, unsigned int>> PadList;
155  PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
156  PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
157  PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
158 
159  descriptor.m_Parameters.m_PadList = PadList;
160  armnn::WorkloadInfo info;
161 
162  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
163  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
164 
165  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
166 
167  inputHandle->Allocate();
168  outputHandle->Allocate();
169 
170  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
171 
172  workload->PostAllocationConfigure();
173  workload->Execute();
174 
175  CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
176 
177  return result;
178 }
179 
180 template<armnn::DataType ArmnnType, typename T>
182  armnn::IWorkloadFactory& workloadFactory,
184  const armnn::ITensorHandleFactory& tensorHandleFactory,
185  float qScale,
186  int32_t qOffset)
187 {
188  IgnoreUnused(memoryManager);
189  const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
190  const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
191 
192  const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
193  const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
194 
195  std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
196  {
197  // Batch 0, Channel 0, Height (3) x Width (2)
198  0, 1,
199  2, 3,
200  4, 5,
201 
202  // Batch 0, Channel 1, Height (3) x Width (2)
203  6, 7,
204  8, 9,
205  10, 11,
206 
207  // Batch 1, Channel 0, Height (3) x Width (2)
208  12, 13,
209  14, 15,
210  16, 17,
211 
212  // Batch 1, Channel 1, Height (3) x Width (2)
213  18, 19,
214  20, 21,
215  22, 23
216  },
217  qScale, qOffset);
218 
219  std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
220  {
221  0, 0, 0, 0,
222  0, 0, 0, 0,
223  0, 0, 0, 0,
224  0, 0, 0, 0,
225  0, 0, 0, 0,
226  0, 0, 0, 0,
227  0, 0, 0, 0,
228 
229  0, 0, 0, 0,
230  0, 0, 0, 0,
231  0, 0, 0, 0,
232  0, 0, 0, 0,
233  0, 0, 0, 0,
234  0, 0, 0, 0,
235  0, 0, 0, 0,
236 
237  0, 0, 0, 0,
238  0, 0, 0, 0,
239  0, 0, 0, 0,
240  0, 0, 0, 0,
241  0, 0, 0, 0,
242  0, 0, 0, 0,
243  0, 0, 0, 0,
244 
245  0, 0, 0, 0,
246  0, 0, 0, 0,
247  0, 0, 0, 0,
248  0, 0, 0, 0,
249  0, 0, 0, 0,
250  0, 0, 0, 0,
251  0, 0, 0, 0,
252 
253  0, 0, 0, 0,
254  0, 0, 0, 0,
255  0, 0, 0, 0,
256  0, 0, 0, 0,
257  0, 0, 0, 0,
258  0, 0, 0, 0,
259  0, 0, 0, 0,
260 
261  0, 0, 0, 0,
262  0, 0, 0, 0,
263  0, 0, 0, 0,
264  0, 0, 0, 0,
265  0, 0, 0, 0,
266  0, 0, 0, 0,
267  0, 0, 0, 0,
268 
269  0, 0, 0, 0,
270  0, 0, 0, 0,
271  0, 0, 0, 0,
272  0, 0, 0, 0,
273  0, 0, 0, 0,
274  0, 0, 0, 0,
275  0, 0, 0, 0,
276 
277  0, 0, 0, 0,
278  0, 0, 0, 0,
279  0, 0, 0, 0,
280  0, 0, 1, 0,
281  0, 2, 3, 0,
282  0, 4, 5, 0,
283  0, 0, 0, 0,
284 
285  0, 0, 0, 0,
286  0, 0, 0, 0,
287  0, 0, 0, 0,
288  0, 6, 7, 0,
289  0, 8, 9, 0,
290  0, 10, 11, 0,
291  0, 0, 0, 0,
292 
293  0, 0, 0, 0,
294  0, 0, 0, 0,
295  0, 0, 0, 0,
296  0, 0, 0, 0,
297  0, 0, 0, 0,
298  0, 0, 0, 0,
299  0, 0, 0, 0,
300 
301  0, 0, 0, 0,
302  0, 0, 0, 0,
303  0, 0, 0, 0,
304  0, 0, 0, 0,
305  0, 0, 0, 0,
306  0, 0, 0, 0,
307  0, 0, 0, 0,
308 
309  0, 0, 0, 0,
310  0, 0, 0, 0,
311  0, 0, 0, 0,
312  0, 0, 0, 0,
313  0, 0, 0, 0,
314  0, 0, 0, 0,
315  0, 0, 0, 0,
316 
317  0, 0, 0, 0,
318  0, 0, 0, 0,
319  0, 0, 0, 0,
320  0, 12, 13, 0,
321  0, 14, 15, 0,
322  0, 16, 17, 0,
323  0, 0, 0, 0,
324 
325  0, 0, 0, 0,
326  0, 0, 0, 0,
327  0, 0, 0, 0,
328  0, 18, 19, 0,
329  0, 20, 21, 0,
330  0, 22, 23, 0,
331  0, 0, 0, 0,
332 
333  0, 0, 0, 0,
334  0, 0, 0, 0,
335  0, 0, 0, 0,
336  0, 0, 0, 0,
337  0, 0, 0, 0,
338  0, 0, 0, 0,
339  0, 0, 0, 0,
340 
341  0, 0, 0, 0,
342  0, 0, 0, 0,
343  0, 0, 0, 0,
344  0, 0, 0, 0,
345  0, 0, 0, 0,
346  0, 0, 0, 0,
347  0, 0, 0, 0,
348 
349  0, 0, 0, 0,
350  0, 0, 0, 0,
351  0, 0, 0, 0,
352  0, 0, 0, 0,
353  0, 0, 0, 0,
354  0, 0, 0, 0,
355  0, 0, 0, 0,
356 
357  0, 0, 0, 0,
358  0, 0, 0, 0,
359  0, 0, 0, 0,
360  0, 0, 0, 0,
361  0, 0, 0, 0,
362  0, 0, 0, 0,
363  0, 0, 0, 0,
364 
365  0, 0, 0, 0,
366  0, 0, 0, 0,
367  0, 0, 0, 0,
368  0, 0, 0, 0,
369  0, 0, 0, 0,
370  0, 0, 0, 0,
371  0, 0, 0, 0,
372 
373  0, 0, 0, 0,
374  0, 0, 0, 0,
375  0, 0, 0, 0,
376  0, 0, 0, 0,
377  0, 0, 0, 0,
378  0, 0, 0, 0,
379  0, 0, 0, 0
380  },
381  qScale, qOffset);
382 
383  auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
384 
385  LayerTestResult<T, 4> result(outputTensorInfo);
386  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
387 
388  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
389  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
390 
391  armnn::PadQueueDescriptor descriptor;
392 
393  std::vector<std::pair<unsigned int, unsigned int>> PadList;
394  PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
395  PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
396  PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
397  PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
398 
399  descriptor.m_Parameters.m_PadList = PadList;
400  armnn::WorkloadInfo info;
401 
402  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
403  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
404 
405  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
406 
407  inputHandle->Allocate();
408  outputHandle->Allocate();
409 
410  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
411 
412  workload->PostAllocationConfigure();
413  workload->Execute();
414 
415  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
416 
417  return result;
418 }
419 
420 template<armnn::DataType ArmnnType, typename T>
422  armnn::IWorkloadFactory& workloadFactory,
424  const armnn::ITensorHandleFactory& tensorHandleFactory,
425  float qScale,
426  int32_t qOffset,
427  const float customPaddingValue)
428 {
429  IgnoreUnused(memoryManager);
430  const armnn::TensorShape inputShape{ 3, 3 };
431  const armnn::TensorShape outputShape{ 7, 7 };
432 
433  const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
434  const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
435 
436  std::vector<T> inputValues =
437  {
438  // Height (3) x Width (3)
439  4, 8, 6,
440  7, 4, 4,
441  3, 2, 4
442  };
443 
444  T p = static_cast<T>(customPaddingValue);
445  std::vector<T> expectedOutputValues =
446  {
447  p, p, p, p, p, p, p,
448  p, p, p, p, p, p, p,
449  p, p, 4, 8, 6, p, p,
450  p, p, 7, 4, 4, p, p,
451  p, p, 3, 2, 4, p, p,
452  p, p, p, p, p, p, p,
453  p, p, p, p, p, p, p
454  };
455 
456  auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
457 
458  LayerTestResult<T, 2> result(outputTensorInfo);
459  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
460 
461  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
462  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
463 
464 
465  armnn::PadQueueDescriptor descriptor;
466 
467  std::vector<std::pair<unsigned int, unsigned int>> padList;
468  padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
469  padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
470 
471  descriptor.m_Parameters.m_PadList = padList;
472  descriptor.m_Parameters.m_PadValue = customPaddingValue;
473  armnn::WorkloadInfo info;
474 
475  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
476  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
477 
478  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
479 
480  inputHandle->Allocate();
481  outputHandle->Allocate();
482 
483  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
484 
485  workload->PostAllocationConfigure();
486  workload->Execute();
487 
488  CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
489 
490  return result;
491 }
492 
493 //
494 // Explicit template specializations
495 //
496 
498 Pad2dTestCommon<armnn::DataType::QSymmS16>(
499  armnn::IWorkloadFactory& workloadFactory,
501  const armnn::ITensorHandleFactory& tensorHandleFactory,
502  float qScale,
503  int32_t qOffset,
504  const float customPaddingValue);
505 
507 Pad3dTestCommon<armnn::DataType::QSymmS16>(
508  armnn::IWorkloadFactory& workloadFactory,
510  const armnn::ITensorHandleFactory& tensorHandleFactory,
511  float qScale,
512  int32_t qOffset);
513 
514 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
515 Pad4dTestCommon<armnn::DataType::QSymmS16>(
516  armnn::IWorkloadFactory& workloadFactory,
518  const armnn::ITensorHandleFactory& tensorHandleFactory,
519  float qScale,
520  int32_t qOffset);
521 
523 PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
524  armnn::IWorkloadFactory& workloadFactory,
526  const armnn::ITensorHandleFactory& tensorHandleFactory,
527  float qScale,
528  int32_t qOffset,
529  const float customPaddingValue);
530 
532 PadQAsymmTestCommon<armnn::DataType::QAsymmU8>(
533  armnn::IWorkloadFactory& workloadFactory,
535  const armnn::ITensorHandleFactory& tensorHandleFactory,
536  float qScale,
537  int32_t qOffset,
538  const float customPaddingValue);
539 
540 //
541 // Implementation functions
542 //
543 
545  armnn::IWorkloadFactory& workloadFactory,
547  const armnn::ITensorHandleFactory& tensorHandleFactory)
548 {
549  return Pad2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
550 }
551 
553  armnn::IWorkloadFactory& workloadFactory,
555  const armnn::ITensorHandleFactory& tensorHandleFactory)
556 {
557  return Pad2dTestCommon<armnn::DataType::QAsymmU8>(
558  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0, 1.0f);
559 }
560 
562  armnn::IWorkloadFactory& workloadFactory,
564  const armnn::ITensorHandleFactory& tensorHandleFactory)
565 {
566  return Pad3dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
567 }
568 
570  armnn::IWorkloadFactory& workloadFactory,
572  const armnn::ITensorHandleFactory& tensorHandleFactory)
573 {
574  return Pad4dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
575 }
576 
578  armnn::IWorkloadFactory& workloadFactory,
580  const armnn::ITensorHandleFactory& tensorHandleFactory)
581 {
582  return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
583 }
584 
586  armnn::IWorkloadFactory& workloadFactory,
588  const armnn::ITensorHandleFactory& tensorHandleFactory)
589 {
590  return Pad2dTestCommon<armnn::DataType::Float32>(
591  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, 1.0f);
592 }
593 
595  armnn::IWorkloadFactory& workloadFactory,
597  const armnn::ITensorHandleFactory& tensorHandleFactory)
598 {
599  return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
600 }
601 
603  armnn::IWorkloadFactory& workloadFactory,
605  const armnn::ITensorHandleFactory& tensorHandleFactory)
606 {
607  return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
608 }
609 
611  armnn::IWorkloadFactory& workloadFactory,
613  const armnn::ITensorHandleFactory& tensorHandleFactory)
614 {
615  return Pad2dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
616 }
617 
619  armnn::IWorkloadFactory& workloadFactory,
621  const armnn::ITensorHandleFactory& tensorHandleFactory)
622 {
623  return Pad2dTestCommon<armnn::DataType::BFloat16>(
624  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, 1.0f);
625 }
626 
628  armnn::IWorkloadFactory& workloadFactory,
630  const armnn::ITensorHandleFactory& tensorHandleFactory)
631 {
632  return Pad3dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
633 }
634 
636  armnn::IWorkloadFactory& workloadFactory,
638  const armnn::ITensorHandleFactory& tensorHandleFactory)
639 {
640  return Pad4dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
641 }
642 
644  armnn::IWorkloadFactory& workloadFactory,
646  const armnn::ITensorHandleFactory& tensorHandleFactory)
647 {
648  return Pad2dTestCommon<armnn::DataType::QSymmS8>(
649  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
650 }
651 
653  armnn::IWorkloadFactory& workloadFactory,
655  const armnn::ITensorHandleFactory& tensorHandleFactory)
656 {
657  return Pad2dTestCommon<armnn::DataType::QSymmS8>(
658  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0, 1.0f);
659 }
660 
662  armnn::IWorkloadFactory& workloadFactory,
664  const armnn::ITensorHandleFactory& tensorHandleFactory)
665 {
666  return Pad3dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
667 }
668 
670  armnn::IWorkloadFactory& workloadFactory,
672  const armnn::ITensorHandleFactory& tensorHandleFactory)
673 {
674  return Pad4dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
675 }
676 
678  armnn::IWorkloadFactory& workloadFactory,
680  const armnn::ITensorHandleFactory& tensorHandleFactory)
681 {
682  return PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
683  workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 2);
684 }
685 
687  armnn::IWorkloadFactory& workloadFactory,
689  const armnn::ITensorHandleFactory& tensorHandleFactory)
690 {
691  return PadQAsymmTestCommon<armnn::DataType::QAsymmS8>(
692  workloadFactory, memoryManager, tensorHandleFactory, 2.0f, 3, 1.0f);
693 }
LayerTestResult< T, 2 > Pad2dTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, const float customPaddingValue)
Definition: PadTestImpl.cpp:20
LayerTestResult< int8_t, 4 > PadInt84dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int8_t, 2 > PadInt82dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
float m_PadValue
Optional value to use for padding, defaults to 0.
LayerTestResult< armnn::BFloat16, 2 > PadBFloat162dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
boost::multi_array< T, n > outputExpected
LayerTestResult< armnn::BFloat16, 4 > PadBFloat164dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 2 > PadQAsymmTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, const float customPaddingValue)
LayerTestResult< float, 2 > PadFloat322dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > Pad4dTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
void IgnoreUnused(Ts &&...)
LayerTestResult< armnn::BFloat16, 2 > PadBFloat162dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > PadUint84dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 3 > PadUint83dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 3 > Pad3dTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
Definition: PadTestImpl.cpp:95
LayerTestResult< int8_t, 3 > PadInt83dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< uint8_t, 2 > PadUint82dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &descriptor, const WorkloadInfo &Info) const
LayerTestResult< float, 4 > PadFloat324dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< uint8_t, 2 > PadUint82dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
boost::multi_array< T, n > output
LayerTestResult< int8_t, 2 > PadInt8CustomPaddingAsymmTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< armnn::BFloat16, 3 > PadBFloat163dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 2 > PadFloat322dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 3 > PadFloat323dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about inputs and outputs to a layer.
LayerTestResult< int8_t, 2 > PadInt8AsymmTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int8_t, 2 > PadInt82dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)