ArmNN
 21.02
AdditionTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "AdditionTestImpl.hpp"
7 
9 
10 #include <QuantizeHelper.hpp>
12 
13 template<>
14 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
15  const armnn::IWorkloadFactory& workloadFactory,
16  const armnn::WorkloadInfo& info,
17  const armnn::AdditionQueueDescriptor& descriptor)
18 {
19  return workloadFactory.CreateAddition(descriptor, info);
20 }
21 
23  armnn::IWorkloadFactory& workloadFactory,
25  const armnn::ITensorHandleFactory& tensorHandleFactory)
26 {
27  unsigned int batchSize = 2u;
28  unsigned int channels = 2u;
29  unsigned int height = 2u;
30  unsigned int width = 3u;
31 
32  unsigned int shape[] = { batchSize, channels, height, width };
33 
34  std::vector<float> input1 =
35  {
36  0.0f, 2.0f, 1.0f,
37  0.2f, 1.0f, 2.0f,
38 
39  1.0f, 2.0f, 1.0f,
40  0.2f, 1.0f, 2.0f,
41 
42  0.0f, 2.0f, 1.0f,
43  4.2f, 1.0f, 2.0f,
44 
45  0.0f, 0.0f, 1.0f,
46  0.2f, 1.0f, 2.0f,
47  };
48 
49  std::vector<float> input2 =
50  {
51  1.0f, 2.0f, 1.0f,
52  0.0f, 1.0f, 2.0f,
53 
54  1.0f, 2.0f, -2.0f,
55  0.2f, 1.0f, 2.0f,
56 
57  0.0f, 2.0f, 1.0f,
58  4.2f, 0.0f, -3.0f,
59 
60  0.0f, 0.0f, 1.0f,
61  0.7f, 1.0f, 5.0f,
62  };
63 
64 
65  std::vector<float> output
66  {
67  1.0f, 4.0f, 2.0f,
68  0.2f, 2.0f, 4.0f,
69 
70  2.0f, 4.0f, -1.0f,
71  0.4f, 2.0f, 4.0f,
72 
73  0.0f, 4.0f, 2.0f,
74  8.4f, 1.0f, -1.0f,
75 
76  0.0f, 0.0f, 2.0f,
77  0.9f, 2.0f, 7.0f,
78  };
79 
80  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
81  workloadFactory,
82  memoryManager,
83  shape,
84  input1,
85  shape,
86  input2,
87  shape,
88  output,
89  tensorHandleFactory);
90 }
91 
93  armnn::IWorkloadFactory& workloadFactory,
95  const armnn::ITensorHandleFactory& tensorHandleFactory)
96 {
97  unsigned int depth = 2u;
98  unsigned int batchSize = 2u;
99  unsigned int channels = 2u;
100  unsigned int height = 2u;
101  unsigned int width = 3u;
102 
103  unsigned int shape[] = { depth, batchSize, channels, height, width };
104 
105  std::vector<float> input1 =
106  {
107  2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
108  2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
109 
110  2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
111  0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
112 
113 
114  1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
115  1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
116 
117  0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
118  0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
119 
120  };
121 
122  std::vector<float> input2 =
123  {
124  4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
125  1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
126 
127  4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
128  0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
129 
130 
131  0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
132  2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
133 
134  3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
135  2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
136  };
137 
138  std::vector<float> output =
139  {
140  7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
141  4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
142 
143  7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
144  0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
145 
146 
147  1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
148  3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
149 
150  4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
151  2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
152  };
153 
154  return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
155  workloadFactory,
156  memoryManager,
157  shape,
158  input1,
159  shape,
160  input2,
161  shape,
162  output,
163  tensorHandleFactory);
164 }
165 
166 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
168  armnn::IWorkloadFactory& workloadFactory,
170  float qScale,
171  int32_t qOffset,
172  const armnn::ITensorHandleFactory& tensorHandleFactory)
173 {
174  IgnoreUnused(memoryManager);
175  armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
176  armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
177  armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
178 
179  if (armnn::IsQuantizedType<T>())
180  {
181  inputTensorInfo1.SetQuantizationScale(qScale);
182  inputTensorInfo1.SetQuantizationOffset(qOffset);
183  inputTensorInfo2.SetQuantizationScale(qScale);
184  inputTensorInfo2.SetQuantizationOffset(qOffset);
185  outputTensorInfo.SetQuantizationScale(qScale);
186  outputTensorInfo.SetQuantizationOffset(qOffset);
187  }
188 
189  auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
190  {
191  0.0f,
192  1.0f,
193 
194  2.0f,
195  3.0f,
196 
197  4.0f,
198  5.0f,
199  },
200  qScale, qOffset));
201 
202  auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
203  {
204  0.5f, 1.5f, 2.5f,
205  3.5f, 4.5f, 5.5f,
206  },
207  qScale, qOffset));
208 
209  LayerTestResult<T,4> ret(outputTensorInfo);
210  ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
211  {
212  0.5f, 1.5f, 2.5f,
213  4.5f, 5.5f, 6.5f,
214 
215  2.5f, 3.5f, 4.5f,
216  6.5f, 7.5f, 8.5f,
217 
218  4.5f, 5.5f, 6.5f,
219  8.5f, 9.5f, 10.5f,
220  },
221  qScale, qOffset));
222 
223  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
224  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
225  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
226 
228  armnn::WorkloadInfo info;
229  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
230  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
231  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
232 
233  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
234 
235  inputHandle1->Allocate();
236  inputHandle2->Allocate();
237  outputHandle->Allocate();
238 
239  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
240  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
241 
242  workload->PostAllocationConfigure();
243  workload->Execute();
244 
245  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
246 
247  return ret;
248 }
249 
250 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
252  armnn::IWorkloadFactory& workloadFactory,
254  float qScale,
255  int32_t qOffset,
256  const armnn::ITensorHandleFactory& tensorHandleFactory)
257 {
258  IgnoreUnused(memoryManager);
259  armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
260  armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
261  armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
262 
263  if (armnn::IsQuantizedType<T>())
264  {
265  inputTensorInfo1.SetQuantizationScale(qScale);
266  inputTensorInfo1.SetQuantizationOffset(qOffset);
267  inputTensorInfo2.SetQuantizationScale(qScale);
268  inputTensorInfo2.SetQuantizationOffset(qOffset);
269  outputTensorInfo.SetQuantizationScale(qScale);
270  outputTensorInfo.SetQuantizationOffset(qOffset);
271  }
272 
273  auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
274  {
275  0.0f, 1.0f, 2.0f,
276  3.0f, 4.0f, 5.0f,
277  6.0f, 7.0f, 8.0f,
278  9.0f, 10.0f, 11.0f,
279  12.0f, 13.0f, 14.0f,
280  15.0f, 16.0f, 17.0f,
281  },
282  qScale, qOffset));
283 
284  auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
285  {
286  0.5f,
287  },
288  qScale, qOffset));
289 
290  LayerTestResult<T,4> ret(outputTensorInfo);
291  ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
292  {
293  0.5f, 1.5f, 2.5f,
294  3.5f, 4.5f, 5.5f,
295  6.5f, 7.5f, 8.5f,
296  9.5f, 10.5f, 11.5f,
297  12.5f, 13.5f, 14.5f,
298  15.5f, 16.5f, 17.5f,
299  },
300  qScale, qOffset));
301 
302  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
303  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
304  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
305 
307  armnn::WorkloadInfo info;
308  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
309  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
310  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
311 
312  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
313 
314  inputHandle1->Allocate();
315  inputHandle2->Allocate();
316  outputHandle->Allocate();
317 
318  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
319  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
320 
321  workload->PostAllocationConfigure();
322  workload->Execute();
323 
324  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
325 
326  return ret;
327 }
328 
330  armnn::IWorkloadFactory& workloadFactory,
332  const armnn::ITensorHandleFactory& tensorHandleFactory)
333 {
334  return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
335  workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
336 }
337 
339  armnn::IWorkloadFactory& workloadFactory,
341  const armnn::ITensorHandleFactory& tensorHandleFactory)
342 {
343  return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
344  workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
345 }
346 
348  armnn::IWorkloadFactory& workloadFactory,
350  const armnn::ITensorHandleFactory& tensorHandleFactory)
351 {
352  return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
353  workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
354 }
355 
357  armnn::IWorkloadFactory& workloadFactory,
359  const armnn::ITensorHandleFactory& tensorHandleFactory)
360 {
361  return AdditionBroadcastTestImpl<armnn::DataType::Signed32>(
362  workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
363 }
364 
366  armnn::IWorkloadFactory& workloadFactory,
368  const armnn::ITensorHandleFactory& tensorHandleFactory)
369 {
370  return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
371  workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
372 }
373 
375  armnn::IWorkloadFactory& workloadFactory,
377  const armnn::ITensorHandleFactory& tensorHandleFactory)
378 {
379  return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
380  workloadFactory, memoryManager, 0.1333333f, 128, tensorHandleFactory);
381 }
382 
384  armnn::IWorkloadFactory& workloadFactory,
386  const armnn::ITensorHandleFactory& tensorHandleFactory)
387 {
388  return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
389  workloadFactory, memoryManager, 0.1333333f, 0, tensorHandleFactory);
390 }
391 
393  armnn::IWorkloadFactory& workloadFactory,
395  const armnn::ITensorHandleFactory& tensorHandleFactory)
396 {
397  return AdditionBroadcast1ElementTestImpl<armnn::DataType::Signed32>(
398  workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
399 }
400 
402  armnn::IWorkloadFactory& workloadFactory,
404  const armnn::ITensorHandleFactory& tensorHandleFactory)
405 {
406  const unsigned int shape0[] = { 1, 2, 2, 3 };
407  const unsigned int shape1[] = { 1, 2, 2, 3 };
408 
409  std::vector<uint8_t> input0(
410  {
411  63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
412  203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
413  });
414 
415  std::vector<uint8_t> input1(
416  {
417  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
418  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
419  });
420 
421  std::vector<uint8_t> output(
422  {
423  81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
424  255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
425  });
426 
427  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
428  workloadFactory,
429  memoryManager,
430  shape0,
431  input0,
432  7.0f,
433  3,
434  shape1,
435  input1,
436  7.0f,
437  3,
438  shape0,
439  output,
440  tensorHandleFactory,
441  7.0f,
442  3);
443 }
444 
446  armnn::IWorkloadFactory& workloadFactory,
448  const armnn::ITensorHandleFactory& tensorHandleFactory)
449 {
450  const unsigned int shape0[] = { 1, 2, 2, 3 };
451  const unsigned int shape1[] = { 1, 2, 2, 3 };
452 
453  std::vector<int16_t> input0 =
454  {
455  63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
456  203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
457  };
458 
459  std::vector<int16_t> input1 =
460  {
461  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
462  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
463  };
464 
465  std::vector<int16_t> output =
466  {
467  84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
468  329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
469  };
470 
471  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
472  workloadFactory,
473  memoryManager,
474  shape0,
475  input0,
476  7.0f,
477  0,
478  shape1,
479  input1,
480  7.0f,
481  0,
482  shape0,
483  output,
484  tensorHandleFactory,
485  7.0f,
486  0);
487 }
488 
490  armnn::IWorkloadFactory& workloadFactory,
492  const armnn::ITensorHandleFactory& tensorHandleFactory)
493 {
494  const unsigned int shape0[] = { 1, 2, 2, 3 };
495  const unsigned int shape1[] = { 1, 2, 2, 3 };
496 
497  std::vector<int32_t> input0 =
498  {
499  63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
500  203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
501  };
502 
503  std::vector<int32_t> input1 =
504  {
505  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
506  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
507  };
508 
509  std::vector<int32_t> output =
510  {
511  84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
512  329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
513  };
514 
515  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Signed32>(
516  workloadFactory,
517  memoryManager,
518  shape0,
519  input0,
520  1.0f,
521  0,
522  shape1,
523  input1,
524  1.0f,
525  0,
526  shape0,
527  output,
528  tensorHandleFactory,
529  1.0f,
530  0);
531 }
532 
534  armnn::IWorkloadFactory& workloadFactory,
536  const armnn::ITensorHandleFactory& tensorHandleFactory)
537 {
538  IgnoreUnused(memoryManager);
539 
540  // Create Initial Tensor
541  // 1, 2, 3
542  // 4, 5, 6
543  // 7, 8, 9
544 
545  armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
546  armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
547 
548  boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
549  {1, 2, 3,
550  4, 5, 6,
551  7, 8, 9
552  });
553  std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
554  tensorHandleFactory.CreateTensorHandle(poolingInputTensorInfo);
555  std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
556  tensorHandleFactory.CreateTensorHandle(poolingOutputTensorInfo);
557 
558  // Apply MaxPool poolSize = 1x1, stride=2x2
559  // Result =
560  // 1, 3
561  // 7, 9
562  armnn::Pooling2dDescriptor descriptor;
563  descriptor.m_PoolHeight = 1;
564  descriptor.m_PoolWidth = 1;
565  descriptor.m_StrideX = 2;
566  descriptor.m_StrideY = 2;
568 
569  armnn::Pooling2dQueueDescriptor queueDescriptor;
570  queueDescriptor.m_Parameters = descriptor;
571  armnn::WorkloadInfo workloadInfo;
572  AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
573  AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
574 
575  // Create the MaxPool
576  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
577 
578  //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
579  auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
580  boost::multi_array<float, 4> resultMaxPool;
581  resultMaxPool.resize(shape);
582 
583 
584  // Create addition with another tensor the same size
585  // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
586  // with the initial tensor.
587  // 12, 16
588  // 24, 28
589 
590  armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
591  armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
592 
593  boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
594  {12, 16,
595  24, 28,
596  });
597 
598  // Expected output tensor after MaxPool and Addition.
599  LayerTestResult<float,4> addRet(addOutputTensorInfo);
600  addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
601  {
602  13, 19,
603  31, 37
604  }));
605 
606  std::unique_ptr<armnn::ITensorHandle> addInputHandle = tensorHandleFactory.CreateTensorHandle(addInputTensorInfo);
607  std::unique_ptr<armnn::ITensorHandle> addOutputHandle =
608  tensorHandleFactory.CreateTensorHandle(addOutputTensorInfo);
609 
611  armnn::WorkloadInfo info;
612 
613  // Add the output of the MaxPool and the new tensor
614  AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
615  AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
616  AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
617 
618  std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
619 
620  poolingInputHandle->Allocate();
621  poolingOutputHandle->Allocate();
622  addInputHandle->Allocate();
623  addOutputHandle->Allocate();
624 
625  CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
626  CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
627 
628  CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
629  CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
630 
631  workload->PostAllocationConfigure();
632  workload->Execute();
633  addWorkload->PostAllocationConfigure();
634  addWorkload->Execute();
635 
636  CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
637 
638  return addRet;
639 }
640 
642  armnn::IWorkloadFactory& workloadFactory,
644  armnn::IWorkloadFactory& refWorkloadFactory,
645  const armnn::ITensorHandleFactory& tensorHandleFactory,
646  const armnn::ITensorHandleFactory& refTensorHandleFactory)
647 {
648  IgnoreUnused(memoryManager);
649  unsigned int batchSize = 4;
650  unsigned int channels = 1;
651  unsigned int height = 2;
652  unsigned int width = 3;
653 
654  armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
655  armnn::TensorInfo outputTensorInfo;
656 
657  unsigned int shape[] = {batchSize, channels, height, width};
658 
659  inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
660  inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
661  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
662 
663  auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
664  auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
665 
666  LayerTestResult<float,4> ret(outputTensorInfo);
667 
668  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
669  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
670  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
671 
672  std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
673  std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
674  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
675 
677  armnn::WorkloadInfo info;
678  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
679  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
680  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
681 
682  armnn::AdditionQueueDescriptor refData = data;
683  armnn::WorkloadInfo refInfo = info;
684  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
685  SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
686  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
687 
688  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
689  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
690 
691  inputHandle1->Allocate();
692  inputHandle2->Allocate();
693  outputHandle->Allocate();
694  inputHandle1Ref->Allocate();
695  inputHandle2Ref->Allocate();
696  outputHandleRef->Allocate();
697 
698  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
699  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
700  CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
701  CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
702 
703  workload->PostAllocationConfigure();
704  workload->Execute();
705  workloadRef->PostAllocationConfigure();
706  workloadRef->Execute();
707 
708  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
709  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
710 
711  return ret;
712 }
uint32_t m_PoolWidth
Pooling width value.
boost::multi_array< T, n > outputExpected
LayerTestResult< int16_t, 4 > AdditionBroadcastInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void IgnoreUnused(Ts &&...)
LayerTestResult< float, 4 > AdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< float, 4 > CompareAdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
uint32_t m_PoolHeight
Pooling height value.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< T, 4 > AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
LayerTestResult< uint8_t, 4 > AdditionUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > AdditionAfterMaxPoolTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
boost::multi_array< T, n > output
LayerTestResult< T, 4 > AdditionBroadcastTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, const armnn::ITensorHandleFactory &tensorHandleFactory)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
LayerTestResult< float, 4 > AdditionBroadcastTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > AdditionBroadcastUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > AdditionBroadcast1ElementTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int32_t, 4 > AdditionBroadcastInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:480
LayerTestResult< int16_t, 4 > AdditionInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 5 > Addition5dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< int32_t, 4 > AdditionInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< int32_t, 4 > AdditionBroadcast1ElementInt32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< int16_t, 4 > AdditionBroadcast1ElementInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)