ArmNN
 20.11
PadTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "PadTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 
12 
13 #include <test/TensorHelpers.hpp>
14 
15 //
16 // Implementation templates
17 //
18 
19 template<armnn::DataType ArmnnType, typename T>
21  armnn::IWorkloadFactory& workloadFactory,
23  const armnn::ITensorHandleFactory& tensorHandleFactory,
24  float qScale,
25  int32_t qOffset,
26  const float customPaddingValue)
27 {
28  IgnoreUnused(memoryManager);
29  const armnn::TensorShape inputShape{ 3, 3 };
30  const armnn::TensorShape outputShape{ 7, 7 };
31 
32  const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
33  const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
34 
35  std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
36  {
37  // Height (3) x Width (3)
38  4, 8, 6,
39  7, 4, 4,
40  3, 2, 4
41  },
42  qScale, qOffset);
43 
44  auto p = customPaddingValue;
45  std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
46  {
47  p, p, p, p, p, p, p,
48  p, p, p, p, p, p, p,
49  p, p, 4, 8, 6, p, p,
50  p, p, 7, 4, 4, p, p,
51  p, p, 3, 2, 4, p, p,
52  p, p, p, p, p, p, p,
53  p, p, p, p, p, p, p
54  },
55  qScale, qOffset);
56 
57  auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
58 
59  LayerTestResult<T, 2> result(outputTensorInfo);
60  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
61 
62  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
63  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
64 
65 
66  armnn::PadQueueDescriptor descriptor;
67 
68  std::vector<std::pair<unsigned int, unsigned int>> padList;
69  padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
70  padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
71 
72  descriptor.m_Parameters.m_PadList = padList;
73  descriptor.m_Parameters.m_PadValue = customPaddingValue;
75 
76  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
77  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
78 
79  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
80 
81  inputHandle->Allocate();
82  outputHandle->Allocate();
83 
84  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
85 
86  workload->PostAllocationConfigure();
87  workload->Execute();
88 
89  CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
90 
91  return result;
92 }
93 
94 template<armnn::DataType ArmnnType, typename T>
96  armnn::IWorkloadFactory& workloadFactory,
98  const armnn::ITensorHandleFactory& tensorHandleFactory,
99  float qScale,
100  int32_t qOffset)
101 {
102  IgnoreUnused(memoryManager);
103  const armnn::TensorShape inputShape{ 2, 2, 2 };
104  const armnn::TensorShape outputShape{ 3, 5, 6 };
105 
106  const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
107  const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
108 
109  std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
110  {
111  // Channel 0, Height (2) x Width (2)
112  0, 4,
113  2, 5,
114 
115  // Channel 1, Height (2) x Width (2)
116  6, 1,
117  5, 2
118  },
119  qScale, qOffset);
120 
121  std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
122  {
123  0, 0, 0, 0, 0, 0,
124  0, 0, 0, 0, 0, 0,
125  0, 0, 0, 4, 0, 0,
126  0, 0, 2, 5, 0, 0,
127  0, 0, 0, 0, 0, 0,
128 
129  0, 0, 0, 0, 0, 0,
130  0, 0, 0, 0, 0, 0,
131  0, 0, 6, 1, 0, 0,
132  0, 0, 5, 2, 0, 0,
133  0, 0, 0, 0, 0, 0,
134 
135  0, 0, 0, 0, 0, 0,
136  0, 0, 0, 0, 0, 0,
137  0, 0, 0, 0, 0, 0,
138  0, 0, 0, 0, 0, 0,
139  0, 0, 0, 0, 0, 0
140  },
141  qScale, qOffset);
142 
143  auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
144 
145  LayerTestResult<T, 3> result(outputTensorInfo);
146  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
147 
148  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
149  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
150 
151 
152  armnn::PadQueueDescriptor descriptor;
153 
154  std::vector<std::pair<unsigned int, unsigned int>> PadList;
155  PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
156  PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
157  PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
158 
159  descriptor.m_Parameters.m_PadList = PadList;
160  armnn::WorkloadInfo info;
161 
162  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
163  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
164 
165  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
166 
167  inputHandle->Allocate();
168  outputHandle->Allocate();
169 
170  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
171 
172  workload->PostAllocationConfigure();
173  workload->Execute();
174 
175  CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
176 
177  return result;
178 }
179 
180 template<armnn::DataType ArmnnType, typename T>
182  armnn::IWorkloadFactory& workloadFactory,
184  const armnn::ITensorHandleFactory& tensorHandleFactory,
185  float qScale,
186  int32_t qOffset)
187 {
188  IgnoreUnused(memoryManager);
189  const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
190  const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
191 
192  const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
193  const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
194 
195  std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
196  {
197  // Batch 0, Channel 0, Height (3) x Width (2)
198  0, 1,
199  2, 3,
200  4, 5,
201 
202  // Batch 0, Channel 1, Height (3) x Width (2)
203  6, 7,
204  8, 9,
205  10, 11,
206 
207  // Batch 1, Channel 0, Height (3) x Width (2)
208  12, 13,
209  14, 15,
210  16, 17,
211 
212  // Batch 1, Channel 1, Height (3) x Width (2)
213  18, 19,
214  20, 21,
215  22, 23
216  },
217  qScale, qOffset);
218 
219  std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
220  {
221  0, 0, 0, 0,
222  0, 0, 0, 0,
223  0, 0, 0, 0,
224  0, 0, 0, 0,
225  0, 0, 0, 0,
226  0, 0, 0, 0,
227  0, 0, 0, 0,
228 
229  0, 0, 0, 0,
230  0, 0, 0, 0,
231  0, 0, 0, 0,
232  0, 0, 0, 0,
233  0, 0, 0, 0,
234  0, 0, 0, 0,
235  0, 0, 0, 0,
236 
237  0, 0, 0, 0,
238  0, 0, 0, 0,
239  0, 0, 0, 0,
240  0, 0, 0, 0,
241  0, 0, 0, 0,
242  0, 0, 0, 0,
243  0, 0, 0, 0,
244 
245  0, 0, 0, 0,
246  0, 0, 0, 0,
247  0, 0, 0, 0,
248  0, 0, 0, 0,
249  0, 0, 0, 0,
250  0, 0, 0, 0,
251  0, 0, 0, 0,
252 
253  0, 0, 0, 0,
254  0, 0, 0, 0,
255  0, 0, 0, 0,
256  0, 0, 0, 0,
257  0, 0, 0, 0,
258  0, 0, 0, 0,
259  0, 0, 0, 0,
260 
261  0, 0, 0, 0,
262  0, 0, 0, 0,
263  0, 0, 0, 0,
264  0, 0, 0, 0,
265  0, 0, 0, 0,
266  0, 0, 0, 0,
267  0, 0, 0, 0,
268 
269  0, 0, 0, 0,
270  0, 0, 0, 0,
271  0, 0, 0, 0,
272  0, 0, 0, 0,
273  0, 0, 0, 0,
274  0, 0, 0, 0,
275  0, 0, 0, 0,
276 
277  0, 0, 0, 0,
278  0, 0, 0, 0,
279  0, 0, 0, 0,
280  0, 0, 1, 0,
281  0, 2, 3, 0,
282  0, 4, 5, 0,
283  0, 0, 0, 0,
284 
285  0, 0, 0, 0,
286  0, 0, 0, 0,
287  0, 0, 0, 0,
288  0, 6, 7, 0,
289  0, 8, 9, 0,
290  0, 10, 11, 0,
291  0, 0, 0, 0,
292 
293  0, 0, 0, 0,
294  0, 0, 0, 0,
295  0, 0, 0, 0,
296  0, 0, 0, 0,
297  0, 0, 0, 0,
298  0, 0, 0, 0,
299  0, 0, 0, 0,
300 
301  0, 0, 0, 0,
302  0, 0, 0, 0,
303  0, 0, 0, 0,
304  0, 0, 0, 0,
305  0, 0, 0, 0,
306  0, 0, 0, 0,
307  0, 0, 0, 0,
308 
309  0, 0, 0, 0,
310  0, 0, 0, 0,
311  0, 0, 0, 0,
312  0, 0, 0, 0,
313  0, 0, 0, 0,
314  0, 0, 0, 0,
315  0, 0, 0, 0,
316 
317  0, 0, 0, 0,
318  0, 0, 0, 0,
319  0, 0, 0, 0,
320  0, 12, 13, 0,
321  0, 14, 15, 0,
322  0, 16, 17, 0,
323  0, 0, 0, 0,
324 
325  0, 0, 0, 0,
326  0, 0, 0, 0,
327  0, 0, 0, 0,
328  0, 18, 19, 0,
329  0, 20, 21, 0,
330  0, 22, 23, 0,
331  0, 0, 0, 0,
332 
333  0, 0, 0, 0,
334  0, 0, 0, 0,
335  0, 0, 0, 0,
336  0, 0, 0, 0,
337  0, 0, 0, 0,
338  0, 0, 0, 0,
339  0, 0, 0, 0,
340 
341  0, 0, 0, 0,
342  0, 0, 0, 0,
343  0, 0, 0, 0,
344  0, 0, 0, 0,
345  0, 0, 0, 0,
346  0, 0, 0, 0,
347  0, 0, 0, 0,
348 
349  0, 0, 0, 0,
350  0, 0, 0, 0,
351  0, 0, 0, 0,
352  0, 0, 0, 0,
353  0, 0, 0, 0,
354  0, 0, 0, 0,
355  0, 0, 0, 0,
356 
357  0, 0, 0, 0,
358  0, 0, 0, 0,
359  0, 0, 0, 0,
360  0, 0, 0, 0,
361  0, 0, 0, 0,
362  0, 0, 0, 0,
363  0, 0, 0, 0,
364 
365  0, 0, 0, 0,
366  0, 0, 0, 0,
367  0, 0, 0, 0,
368  0, 0, 0, 0,
369  0, 0, 0, 0,
370  0, 0, 0, 0,
371  0, 0, 0, 0,
372 
373  0, 0, 0, 0,
374  0, 0, 0, 0,
375  0, 0, 0, 0,
376  0, 0, 0, 0,
377  0, 0, 0, 0,
378  0, 0, 0, 0,
379  0, 0, 0, 0
380  },
381  qScale, qOffset);
382 
383  auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
384 
385  LayerTestResult<T, 4> result(outputTensorInfo);
386  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
387 
388  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
389  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
390 
391  armnn::PadQueueDescriptor descriptor;
392 
393  std::vector<std::pair<unsigned int, unsigned int>> PadList;
394  PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
395  PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
396  PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
397  PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
398 
399  descriptor.m_Parameters.m_PadList = PadList;
400  armnn::WorkloadInfo info;
401 
402  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
403  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
404 
405  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
406 
407  inputHandle->Allocate();
408  outputHandle->Allocate();
409 
410  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
411 
412  workload->PostAllocationConfigure();
413  workload->Execute();
414 
415  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
416 
417  return result;
418 }
419 
420 //
421 // Explicit template specializations
422 //
423 
425 Pad2dTestCommon<armnn::DataType::QSymmS16>(
426  armnn::IWorkloadFactory& workloadFactory,
428  const armnn::ITensorHandleFactory& tensorHandleFactory,
429  float qScale,
430  int32_t qOffset,
431  const float customPaddingValue);
432 
434 Pad3dTestCommon<armnn::DataType::QSymmS16>(
435  armnn::IWorkloadFactory& workloadFactory,
437  const armnn::ITensorHandleFactory& tensorHandleFactory,
438  float qScale,
439  int32_t qOffset);
440 
441 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
442 Pad4dTestCommon<armnn::DataType::QSymmS16>(
443  armnn::IWorkloadFactory& workloadFactory,
445  const armnn::ITensorHandleFactory& tensorHandleFactory,
446  float qScale,
447  int32_t qOffset);
448 
449 //
450 // Implementation functions
451 //
452 
454  armnn::IWorkloadFactory& workloadFactory,
456  const armnn::ITensorHandleFactory& tensorHandleFactory)
457 {
458  return Pad2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
459 }
460 
462  armnn::IWorkloadFactory& workloadFactory,
464  const armnn::ITensorHandleFactory& tensorHandleFactory)
465 {
466  return Pad2dTestCommon<armnn::DataType::QAsymmU8>(
467  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0, 1.0f);
468 }
469 
471  armnn::IWorkloadFactory& workloadFactory,
473  const armnn::ITensorHandleFactory& tensorHandleFactory)
474 {
475  return Pad3dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
476 }
477 
479  armnn::IWorkloadFactory& workloadFactory,
481  const armnn::ITensorHandleFactory& tensorHandleFactory)
482 {
483  return Pad4dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
484 }
485 
487  armnn::IWorkloadFactory& workloadFactory,
489  const armnn::ITensorHandleFactory& tensorHandleFactory)
490 {
491  return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
492 }
493 
495  armnn::IWorkloadFactory& workloadFactory,
497  const armnn::ITensorHandleFactory& tensorHandleFactory)
498 {
499  return Pad2dTestCommon<armnn::DataType::Float32>(
500  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, 1.0f);
501 }
502 
504  armnn::IWorkloadFactory& workloadFactory,
506  const armnn::ITensorHandleFactory& tensorHandleFactory)
507 {
508  return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
509 }
510 
512  armnn::IWorkloadFactory& workloadFactory,
514  const armnn::ITensorHandleFactory& tensorHandleFactory)
515 {
516  return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
517 }
518 
520  armnn::IWorkloadFactory& workloadFactory,
522  const armnn::ITensorHandleFactory& tensorHandleFactory)
523 {
524  return Pad2dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
525 }
526 
528  armnn::IWorkloadFactory& workloadFactory,
530  const armnn::ITensorHandleFactory& tensorHandleFactory)
531 {
532  return Pad2dTestCommon<armnn::DataType::BFloat16>(
533  workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0, 1.0f);
534 }
535 
537  armnn::IWorkloadFactory& workloadFactory,
539  const armnn::ITensorHandleFactory& tensorHandleFactory)
540 {
541  return Pad3dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
542 }
543 
545  armnn::IWorkloadFactory& workloadFactory,
547  const armnn::ITensorHandleFactory& tensorHandleFactory)
548 {
549  return Pad4dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
550 }
551 
553  armnn::IWorkloadFactory& workloadFactory,
555  const armnn::ITensorHandleFactory& tensorHandleFactory)
556 {
557  return Pad2dTestCommon<armnn::DataType::QSymmS8>(
558  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
559 }
560 
562  armnn::IWorkloadFactory& workloadFactory,
564  const armnn::ITensorHandleFactory& tensorHandleFactory)
565 {
566  return Pad2dTestCommon<armnn::DataType::QSymmS8>(
567  workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0, 1.0f);
568 }
569 
571  armnn::IWorkloadFactory& workloadFactory,
573  const armnn::ITensorHandleFactory& tensorHandleFactory)
574 {
575  return Pad3dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
576 }
577 
579  armnn::IWorkloadFactory& workloadFactory,
581  const armnn::ITensorHandleFactory& tensorHandleFactory)
582 {
583  return Pad4dTestCommon<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
584 }
LayerTestResult< T, 2 > Pad2dTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset, const float customPaddingValue)
Definition: PadTestImpl.cpp:20
LayerTestResult< int8_t, 4 > PadInt84dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int8_t, 2 > PadInt82dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
float m_PadValue
Optional value to use for padding, defaults to 0.
LayerTestResult< armnn::BFloat16, 2 > PadBFloat162dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
boost::multi_array< T, n > outputExpected
LayerTestResult< armnn::BFloat16, 4 > PadBFloat164dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 2 > PadFloat322dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 4 > Pad4dTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
void IgnoreUnused(Ts &&...)
LayerTestResult< armnn::BFloat16, 2 > PadBFloat162dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > PadUint84dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 3 > PadUint83dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< T, 3 > Pad3dTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, float qScale, int32_t qOffset)
Definition: PadTestImpl.cpp:95
LayerTestResult< int8_t, 3 > PadInt83dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< uint8_t, 2 > PadUint82dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &descriptor, const WorkloadInfo &Info) const
LayerTestResult< float, 4 > PadFloat324dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< uint8_t, 2 > PadUint82dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
boost::multi_array< T, n > output
LayerTestResult< armnn::BFloat16, 3 > PadBFloat163dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 2 > PadFloat322dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 3 > PadFloat323dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about inputs and outputs to a layer.
LayerTestResult< int8_t, 2 > PadInt82dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)