ArmNN
 20.05
PadTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "PadTestImpl.hpp"
7 
8 #include <QuantizeHelper.hpp>
9 
12 
13 #include <test/TensorHelpers.hpp>
14 
15 //
16 // Implementation templates
17 //
18 
19 template<armnn::DataType ArmnnType, typename T>
21  armnn::IWorkloadFactory& workloadFactory,
23  float qScale,
24  int32_t qOffset,
25  const float customPaddingValue)
26 {
27  IgnoreUnused(memoryManager);
28  const armnn::TensorShape inputShape{ 3, 3 };
29  const armnn::TensorShape outputShape{ 7, 7 };
30 
31  const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
32  const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
33 
34  std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
35  {
36  // Height (3) x Width (3)
37  4, 8, 6,
38  7, 4, 4,
39  3, 2, 4
40  },
41  qScale, qOffset);
42 
43  auto p = customPaddingValue;
44  std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
45  {
46  p, p, p, p, p, p, p,
47  p, p, p, p, p, p, p,
48  p, p, 4, 8, 6, p, p,
49  p, p, 7, 4, 4, p, p,
50  p, p, 3, 2, 4, p, p,
51  p, p, p, p, p, p, p,
52  p, p, p, p, p, p, p
53  },
54  qScale, qOffset);
55 
56  auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
57 
58  LayerTestResult<T, 2> result(outputTensorInfo);
59  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
60 
61  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
62  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
63 
64  armnn::PadQueueDescriptor descriptor;
65 
66  std::vector<std::pair<unsigned int, unsigned int>> padList;
67  padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
68  padList.push_back(std::pair<unsigned int, unsigned int>(2,2));
69 
70  descriptor.m_Parameters.m_PadList = padList;
71  descriptor.m_Parameters.m_PadValue = customPaddingValue;
73 
74  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
75  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
76 
77  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
78 
79  inputHandle->Allocate();
80  outputHandle->Allocate();
81 
82  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
83 
84  workload->PostAllocationConfigure();
85  workload->Execute();
86 
87  CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
88 
89  return result;
90 }
91 
92 template<armnn::DataType ArmnnType, typename T>
94  armnn::IWorkloadFactory& workloadFactory,
96  float qScale,
97  int32_t qOffset)
98 {
99  IgnoreUnused(memoryManager);
100  const armnn::TensorShape inputShape{ 2, 2, 2 };
101  const armnn::TensorShape outputShape{ 3, 5, 6 };
102 
103  const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
104  const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
105 
106  std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
107  {
108  // Channel 0, Height (2) x Width (2)
109  0, 4,
110  2, 5,
111 
112  // Channel 1, Height (2) x Width (2)
113  6, 1,
114  5, 2
115  },
116  qScale, qOffset);
117 
118  std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
119  {
120  0, 0, 0, 0, 0, 0,
121  0, 0, 0, 0, 0, 0,
122  0, 0, 0, 4, 0, 0,
123  0, 0, 2, 5, 0, 0,
124  0, 0, 0, 0, 0, 0,
125 
126  0, 0, 0, 0, 0, 0,
127  0, 0, 0, 0, 0, 0,
128  0, 0, 6, 1, 0, 0,
129  0, 0, 5, 2, 0, 0,
130  0, 0, 0, 0, 0, 0,
131 
132  0, 0, 0, 0, 0, 0,
133  0, 0, 0, 0, 0, 0,
134  0, 0, 0, 0, 0, 0,
135  0, 0, 0, 0, 0, 0,
136  0, 0, 0, 0, 0, 0
137  },
138  qScale, qOffset);
139 
140  auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
141 
142  LayerTestResult<T, 3> result(outputTensorInfo);
143  result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
144 
145  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
146  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
147 
148  armnn::PadQueueDescriptor descriptor;
149 
150  std::vector<std::pair<unsigned int, unsigned int>> PadList;
151  PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
152  PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
153  PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
154 
155  descriptor.m_Parameters.m_PadList = PadList;
156  armnn::WorkloadInfo info;
157 
158  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
159  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
160 
161  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
162 
163  inputHandle->Allocate();
164  outputHandle->Allocate();
165 
166  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
167 
168  workload->PostAllocationConfigure();
169  workload->Execute();
170 
171  CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
172 
173  return result;
174 }
175 
176 template<armnn::DataType ArmnnType, typename T>
178  armnn::IWorkloadFactory& workloadFactory,
180  float qScale,
181  int32_t qOffset)
182 {
183  IgnoreUnused(memoryManager);
184  const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
185  const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
186 
187  const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
188  const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
189 
190  std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
191  {
192  // Batch 0, Channel 0, Height (3) x Width (2)
193  0, 1,
194  2, 3,
195  4, 5,
196 
197  // Batch 0, Channel 1, Height (3) x Width (2)
198  6, 7,
199  8, 9,
200  10, 11,
201 
202  // Batch 1, Channel 0, Height (3) x Width (2)
203  12, 13,
204  14, 15,
205  16, 17,
206 
207  // Batch 1, Channel 1, Height (3) x Width (2)
208  18, 19,
209  20, 21,
210  22, 23
211  },
212  qScale, qOffset);
213 
214  std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
215  {
216  0, 0, 0, 0,
217  0, 0, 0, 0,
218  0, 0, 0, 0,
219  0, 0, 0, 0,
220  0, 0, 0, 0,
221  0, 0, 0, 0,
222  0, 0, 0, 0,
223 
224  0, 0, 0, 0,
225  0, 0, 0, 0,
226  0, 0, 0, 0,
227  0, 0, 0, 0,
228  0, 0, 0, 0,
229  0, 0, 0, 0,
230  0, 0, 0, 0,
231 
232  0, 0, 0, 0,
233  0, 0, 0, 0,
234  0, 0, 0, 0,
235  0, 0, 0, 0,
236  0, 0, 0, 0,
237  0, 0, 0, 0,
238  0, 0, 0, 0,
239 
240  0, 0, 0, 0,
241  0, 0, 0, 0,
242  0, 0, 0, 0,
243  0, 0, 0, 0,
244  0, 0, 0, 0,
245  0, 0, 0, 0,
246  0, 0, 0, 0,
247 
248  0, 0, 0, 0,
249  0, 0, 0, 0,
250  0, 0, 0, 0,
251  0, 0, 0, 0,
252  0, 0, 0, 0,
253  0, 0, 0, 0,
254  0, 0, 0, 0,
255 
256  0, 0, 0, 0,
257  0, 0, 0, 0,
258  0, 0, 0, 0,
259  0, 0, 0, 0,
260  0, 0, 0, 0,
261  0, 0, 0, 0,
262  0, 0, 0, 0,
263 
264  0, 0, 0, 0,
265  0, 0, 0, 0,
266  0, 0, 0, 0,
267  0, 0, 0, 0,
268  0, 0, 0, 0,
269  0, 0, 0, 0,
270  0, 0, 0, 0,
271 
272  0, 0, 0, 0,
273  0, 0, 0, 0,
274  0, 0, 0, 0,
275  0, 0, 1, 0,
276  0, 2, 3, 0,
277  0, 4, 5, 0,
278  0, 0, 0, 0,
279 
280  0, 0, 0, 0,
281  0, 0, 0, 0,
282  0, 0, 0, 0,
283  0, 6, 7, 0,
284  0, 8, 9, 0,
285  0, 10, 11, 0,
286  0, 0, 0, 0,
287 
288  0, 0, 0, 0,
289  0, 0, 0, 0,
290  0, 0, 0, 0,
291  0, 0, 0, 0,
292  0, 0, 0, 0,
293  0, 0, 0, 0,
294  0, 0, 0, 0,
295 
296  0, 0, 0, 0,
297  0, 0, 0, 0,
298  0, 0, 0, 0,
299  0, 0, 0, 0,
300  0, 0, 0, 0,
301  0, 0, 0, 0,
302  0, 0, 0, 0,
303 
304  0, 0, 0, 0,
305  0, 0, 0, 0,
306  0, 0, 0, 0,
307  0, 0, 0, 0,
308  0, 0, 0, 0,
309  0, 0, 0, 0,
310  0, 0, 0, 0,
311 
312  0, 0, 0, 0,
313  0, 0, 0, 0,
314  0, 0, 0, 0,
315  0, 12, 13, 0,
316  0, 14, 15, 0,
317  0, 16, 17, 0,
318  0, 0, 0, 0,
319 
320  0, 0, 0, 0,
321  0, 0, 0, 0,
322  0, 0, 0, 0,
323  0, 18, 19, 0,
324  0, 20, 21, 0,
325  0, 22, 23, 0,
326  0, 0, 0, 0,
327 
328  0, 0, 0, 0,
329  0, 0, 0, 0,
330  0, 0, 0, 0,
331  0, 0, 0, 0,
332  0, 0, 0, 0,
333  0, 0, 0, 0,
334  0, 0, 0, 0,
335 
336  0, 0, 0, 0,
337  0, 0, 0, 0,
338  0, 0, 0, 0,
339  0, 0, 0, 0,
340  0, 0, 0, 0,
341  0, 0, 0, 0,
342  0, 0, 0, 0,
343 
344  0, 0, 0, 0,
345  0, 0, 0, 0,
346  0, 0, 0, 0,
347  0, 0, 0, 0,
348  0, 0, 0, 0,
349  0, 0, 0, 0,
350  0, 0, 0, 0,
351 
352  0, 0, 0, 0,
353  0, 0, 0, 0,
354  0, 0, 0, 0,
355  0, 0, 0, 0,
356  0, 0, 0, 0,
357  0, 0, 0, 0,
358  0, 0, 0, 0,
359 
360  0, 0, 0, 0,
361  0, 0, 0, 0,
362  0, 0, 0, 0,
363  0, 0, 0, 0,
364  0, 0, 0, 0,
365  0, 0, 0, 0,
366  0, 0, 0, 0,
367 
368  0, 0, 0, 0,
369  0, 0, 0, 0,
370  0, 0, 0, 0,
371  0, 0, 0, 0,
372  0, 0, 0, 0,
373  0, 0, 0, 0,
374  0, 0, 0, 0
375  },
376  qScale, qOffset);
377 
378  auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
379 
380  LayerTestResult<T, 4> result(outputTensorInfo);
381  result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
382 
383  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
384  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
385 
386  armnn::PadQueueDescriptor descriptor;
387 
388  std::vector<std::pair<unsigned int, unsigned int>> PadList;
389  PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
390  PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
391  PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
392  PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
393 
394  descriptor.m_Parameters.m_PadList = PadList;
395  armnn::WorkloadInfo info;
396 
397  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
398  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
399 
400  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
401 
402  inputHandle->Allocate();
403  outputHandle->Allocate();
404 
405  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
406 
407  workload->PostAllocationConfigure();
408  workload->Execute();
409 
410  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
411 
412  return result;
413 }
414 
415 //
416 // Explicit template specializations
417 //
418 
420 Pad2dTestCommon<armnn::DataType::QSymmS16>(
421  armnn::IWorkloadFactory& workloadFactory,
423  float qScale,
424  int32_t qOffset,
425  const float customPaddingValue);
426 
428 Pad3dTestCommon<armnn::DataType::QSymmS16>(
429  armnn::IWorkloadFactory& workloadFactory,
431  float qScale,
432  int32_t qOffset);
433 
434 template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
435 Pad4dTestCommon<armnn::DataType::QSymmS16>(
436  armnn::IWorkloadFactory& workloadFactory,
438  float qScale,
439  int32_t qOffset);
440 
441 //
442 // Implementation functions
443 //
444 
446  armnn::IWorkloadFactory& workloadFactory,
448 {
449  return Pad2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
450 }
451 
453  armnn::IWorkloadFactory& workloadFactory,
455 {
456  return Pad2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0, 1.0f);
457 }
458 
460  armnn::IWorkloadFactory& workloadFactory,
462 {
463  return Pad3dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
464 }
465 
467  armnn::IWorkloadFactory& workloadFactory,
469 {
470  return Pad4dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
471 }
472 
474  armnn::IWorkloadFactory& workloadFactory,
476 {
477  return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
478 }
479 
481  armnn::IWorkloadFactory& workloadFactory,
483 {
484  return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
485 }
486 
488  armnn::IWorkloadFactory& workloadFactory,
490 {
491  return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
492 }
493 
495  armnn::IWorkloadFactory& workloadFactory,
497 {
498  return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
499 }
500 
502  armnn::IWorkloadFactory& workloadFactory,
504 {
505  return Pad2dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
506 }
507 
509  armnn::IWorkloadFactory& workloadFactory,
511 {
512  return Pad2dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0, 1.0f);
513 }
514 
516  armnn::IWorkloadFactory& workloadFactory,
518 {
519  return Pad3dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
520 }
521 
523  armnn::IWorkloadFactory& workloadFactory,
525 {
526  return Pad4dTestCommon<armnn::DataType::BFloat16>(workloadFactory, memoryManager, 0.0f, 0);
527 }
float m_PadValue
Optional value to use for padding, defaults to 0.
LayerTestResult< armnn::BFloat16, 2 > PadBFloat162dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
boost::multi_array< T, n > outputExpected
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
LayerTestResult< uint8_t, 3 > PadUint83dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 2 > PadFloat322dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
LayerTestResult< T, 4 > Pad4dTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< T, 3 > Pad3dTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
Definition: PadTestImpl.cpp:93
LayerTestResult< uint8_t, 2 > PadUint82dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > PadFloat324dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 2 > PadFloat322dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
virtual std::unique_ptr< IWorkload > CreatePad(const PadQueueDescriptor &descriptor, const WorkloadInfo &Info) const
LayerTestResult< float, 3 > PadFloat323dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< armnn::BFloat16, 2 > PadBFloat162dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
boost::multi_array< T, n > output
LayerTestResult< uint8_t, 4 > PadUint84dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< armnn::BFloat16, 3 > PadBFloat163dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< armnn::BFloat16, 4 > PadBFloat164dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
LayerTestResult< uint8_t, 2 > PadUint82dCustomPaddingTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 2 > Pad2dTestCommon(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, const float customPaddingValue)
Definition: PadTestImpl.cpp:20
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)