ArmNN
 20.02
AdditionTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "AdditionTestImpl.hpp"
7 
9 
10 #include <QuantizeHelper.hpp>
11 
12 template<>
13 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
14  const armnn::IWorkloadFactory& workloadFactory,
15  const armnn::WorkloadInfo& info,
16  const armnn::AdditionQueueDescriptor& descriptor)
17 {
18  return workloadFactory.CreateAddition(descriptor, info);
19 }
20 
22  armnn::IWorkloadFactory& workloadFactory,
24 {
25  unsigned int batchSize = 2u;
26  unsigned int channels = 2u;
27  unsigned int height = 2u;
28  unsigned int width = 3u;
29 
30  unsigned int shape[] = { batchSize, channels, height, width };
31 
32  std::vector<float> input1 =
33  {
34  0.0f, 2.0f, 1.0f,
35  0.2f, 1.0f, 2.0f,
36 
37  1.0f, 2.0f, 1.0f,
38  0.2f, 1.0f, 2.0f,
39 
40  0.0f, 2.0f, 1.0f,
41  4.2f, 1.0f, 2.0f,
42 
43  0.0f, 0.0f, 1.0f,
44  0.2f, 1.0f, 2.0f,
45  };
46 
47  std::vector<float> input2 =
48  {
49  1.0f, 2.0f, 1.0f,
50  0.0f, 1.0f, 2.0f,
51 
52  1.0f, 2.0f, -2.0f,
53  0.2f, 1.0f, 2.0f,
54 
55  0.0f, 2.0f, 1.0f,
56  4.2f, 0.0f, -3.0f,
57 
58  0.0f, 0.0f, 1.0f,
59  0.7f, 1.0f, 5.0f,
60  };
61 
62 
63  std::vector<float> output
64  {
65  1.0f, 4.0f, 2.0f,
66  0.2f, 2.0f, 4.0f,
67 
68  2.0f, 4.0f, -1.0f,
69  0.4f, 2.0f, 4.0f,
70 
71  0.0f, 4.0f, 2.0f,
72  8.4f, 1.0f, -1.0f,
73 
74  0.0f, 0.0f, 2.0f,
75  0.9f, 2.0f, 7.0f,
76  };
77 
78  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
79  workloadFactory,
80  memoryManager,
81  shape,
82  input1,
83  shape,
84  input2,
85  shape,
86  output);
87 }
88 
90  armnn::IWorkloadFactory& workloadFactory,
92 {
93  unsigned int depth = 2u;
94  unsigned int batchSize = 2u;
95  unsigned int channels = 2u;
96  unsigned int height = 2u;
97  unsigned int width = 3u;
98 
99  unsigned int shape[] = { depth, batchSize, channels, height, width };
100 
101  std::vector<float> input1 =
102  {
103  2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
104  2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
105 
106  2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
107  0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
108 
109 
110  1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
111  1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
112 
113  0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
114  0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
115 
116  };
117 
118  std::vector<float> input2 =
119  {
120  4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
121  1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
122 
123  4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
124  0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
125 
126 
127  0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
128  2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
129 
130  3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
131  2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
132  };
133 
134  std::vector<float> output =
135  {
136  7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
137  4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
138 
139  7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
140  0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
141 
142 
143  1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
144  3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
145 
146  4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
147  2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
148  };
149 
150  return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
151  workloadFactory,
152  memoryManager,
153  shape,
154  input1,
155  shape,
156  input2,
157  shape,
158  output);
159 }
160 
161 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
163  armnn::IWorkloadFactory& workloadFactory,
165  float qScale,
166  int32_t qOffset)
167 {
168  IgnoreUnused(memoryManager);
169  armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
170  armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
171  armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
172 
173  if (armnn::IsQuantizedType<T>())
174  {
175  inputTensorInfo1.SetQuantizationScale(qScale);
176  inputTensorInfo1.SetQuantizationOffset(qOffset);
177  inputTensorInfo2.SetQuantizationScale(qScale);
178  inputTensorInfo2.SetQuantizationOffset(qOffset);
179  outputTensorInfo.SetQuantizationScale(qScale);
180  outputTensorInfo.SetQuantizationOffset(qOffset);
181  }
182 
183  auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
184  {
185  0.0f,
186  1.0f,
187 
188  2.0f,
189  3.0f,
190 
191  4.0f,
192  5.0f,
193  },
194  qScale, qOffset));
195 
196  auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
197  {
198  0.5f, 1.5f, 2.5f,
199  3.5f, 4.5f, 5.5f,
200  },
201  qScale, qOffset));
202 
203  LayerTestResult<T,4> ret(outputTensorInfo);
204  ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
205  {
206  0.5f, 1.5f, 2.5f,
207  4.5f, 5.5f, 6.5f,
208 
209  2.5f, 3.5f, 4.5f,
210  6.5f, 7.5f, 8.5f,
211 
212  4.5f, 5.5f, 6.5f,
213  8.5f, 9.5f, 10.5f,
214  },
215  qScale, qOffset));
216 
217  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
218  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
219  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
220 
222  armnn::WorkloadInfo info;
223  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
224  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
225  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
226 
227  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
228 
229  inputHandle1->Allocate();
230  inputHandle2->Allocate();
231  outputHandle->Allocate();
232 
233  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
234  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
235 
236  workload->PostAllocationConfigure();
237  workload->Execute();
238 
239  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
240 
241  return ret;
242 }
243 
244 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
246  armnn::IWorkloadFactory& workloadFactory,
248  float qScale,
249  int32_t qOffset)
250 {
251  IgnoreUnused(memoryManager);
252  armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
253  armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
254  armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
255 
256  if (armnn::IsQuantizedType<T>())
257  {
258  inputTensorInfo1.SetQuantizationScale(qScale);
259  inputTensorInfo1.SetQuantizationOffset(qOffset);
260  inputTensorInfo2.SetQuantizationScale(qScale);
261  inputTensorInfo2.SetQuantizationOffset(qOffset);
262  outputTensorInfo.SetQuantizationScale(qScale);
263  outputTensorInfo.SetQuantizationOffset(qOffset);
264  }
265 
266  auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
267  {
268  0.0f, 1.0f, 2.0f,
269  3.0f, 4.0f, 5.0f,
270  6.0f, 7.0f, 8.0f,
271  9.0f, 10.0f, 11.0f,
272  12.0f, 13.0f, 14.0f,
273  15.0f, 16.0f, 17.0f,
274  },
275  qScale, qOffset));
276 
277  auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
278  {
279  0.5f,
280  },
281  qScale, qOffset));
282 
283  LayerTestResult<T,4> ret(outputTensorInfo);
284  ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
285  {
286  0.5f, 1.5f, 2.5f,
287  3.5f, 4.5f, 5.5f,
288  6.5f, 7.5f, 8.5f,
289  9.5f, 10.5f, 11.5f,
290  12.5f, 13.5f, 14.5f,
291  15.5f, 16.5f, 17.5f,
292  },
293  qScale, qOffset));
294 
295  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
296  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
297  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
298 
300  armnn::WorkloadInfo info;
301  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
302  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
303  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
304 
305  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
306 
307  inputHandle1->Allocate();
308  inputHandle2->Allocate();
309  outputHandle->Allocate();
310 
311  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
312  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
313 
314  workload->PostAllocationConfigure();
315  workload->Execute();
316 
317  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
318 
319  return ret;
320 }
321 
323  armnn::IWorkloadFactory& workloadFactory,
325 {
326  return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
327  workloadFactory, memoryManager, 0.0f, 0);
328 }
329 
331  armnn::IWorkloadFactory& workloadFactory,
333 {
334  return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
335  workloadFactory, memoryManager, 2.f, 0);
336 }
337 
339  armnn::IWorkloadFactory& workloadFactory,
341 {
342  return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
343  workloadFactory, memoryManager, 2.f, 0);
344 }
345 
347  armnn::IWorkloadFactory& workloadFactory,
349 {
350  return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
351  workloadFactory, memoryManager, 0.0f, 0);
352 }
353 
355  armnn::IWorkloadFactory& workloadFactory,
357 {
358  return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
359  workloadFactory, memoryManager, 0.1333333f, 128);
360 }
361 
363  armnn::IWorkloadFactory& workloadFactory,
365 {
366  return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
367  workloadFactory, memoryManager, 0.1333333f, 0);
368 }
369 
371  armnn::IWorkloadFactory& workloadFactory,
373 {
374  const unsigned int shape0[] = { 1, 2, 2, 3 };
375  const unsigned int shape1[] = { 1, 2, 2, 3 };
376 
377  std::vector<uint8_t> input0(
378  {
379  63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
380  203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
381  });
382 
383  std::vector<uint8_t> input1(
384  {
385  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
386  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
387  });
388 
389  std::vector<uint8_t> output(
390  {
391  81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
392  255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
393  });
394 
395  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
396  workloadFactory,
397  memoryManager,
398  shape0,
399  input0,
400  7.0f,
401  3,
402  shape1,
403  input1,
404  7.0f,
405  3,
406  shape0,
407  output,
408  7.0f,
409  3);
410 }
411 
413  armnn::IWorkloadFactory& workloadFactory,
415 {
416  const unsigned int shape0[] = { 1, 2, 2, 3 };
417  const unsigned int shape1[] = { 1, 2, 2, 3 };
418 
419  std::vector<int16_t> input0 =
420  {
421  63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
422  203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
423  };
424 
425  std::vector<int16_t> input1 =
426  {
427  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
428  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
429  };
430 
431  std::vector<int16_t> output =
432  {
433  84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
434  329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
435  };
436 
437  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
438  workloadFactory,
439  memoryManager,
440  shape0,
441  input0,
442  7.0f,
443  0,
444  shape1,
445  input1,
446  7.0f,
447  0,
448  shape0,
449  output,
450  7.0f,
451  0);
452 }
453 
455  armnn::IWorkloadFactory& workloadFactory,
457 {
458  IgnoreUnused(memoryManager);
459 
460  // Create Initial Tensor
461  // 1, 2, 3
462  // 4, 5, 6
463  // 7, 8, 9
464 
465  armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
466  armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
467 
468  boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
469  {1, 2, 3,
470  4, 5, 6,
471  7, 8, 9
472  });
473 
474  std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
475  workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
476  std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
477  workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
478 
479  // Apply MaxPool poolSize = 1x1, stride=2x2
480  // Result =
481  // 1, 3
482  // 7, 9
483  armnn::Pooling2dDescriptor descriptor;
484  descriptor.m_PoolHeight = 1;
485  descriptor.m_PoolWidth = 1;
486  descriptor.m_StrideX = 2;
487  descriptor.m_StrideY = 2;
489 
490  armnn::Pooling2dQueueDescriptor queueDescriptor;
491  queueDescriptor.m_Parameters = descriptor;
492  armnn::WorkloadInfo workloadInfo;
493  AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
494  AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
495 
496  // Create the MaxPool
497  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
498 
499  //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
500  auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
501  boost::multi_array<float, 4> resultMaxPool;
502  resultMaxPool.resize(shape);
503 
504 
505  // Create addition with another tensor the same size
506  // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
507  // with the initial tensor.
508  // 12, 16
509  // 24, 28
510 
511  armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
512  armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
513 
514  boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
515  {12, 16,
516  24, 28,
517  });
518 
519  // Expected output tensor after MaxPool and Addition.
520  LayerTestResult<float,4> addRet(addOutputTensorInfo);
521  addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
522  {
523  13, 19,
524  31, 37
525  }));
526 
527  std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
528  std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
529 
531  armnn::WorkloadInfo info;
532 
533  // Add the output of the MaxPool and the new tensor
534  AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
535  AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
536  AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
537 
538  std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
539 
540  poolingInputHandle->Allocate();
541  poolingOutputHandle->Allocate();
542  addInputHandle->Allocate();
543  addOutputHandle->Allocate();
544 
545  CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
546  CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
547 
548  CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
549  CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
550 
551  workload->PostAllocationConfigure();
552  workload->Execute();
553  addWorkload->PostAllocationConfigure();
554  addWorkload->Execute();
555 
556  CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
557 
558  return addRet;
559 }
560 
562  armnn::IWorkloadFactory& workloadFactory,
564  armnn::IWorkloadFactory& refWorkloadFactory)
565 {
566  IgnoreUnused(memoryManager);
567  unsigned int batchSize = 4;
568  unsigned int channels = 1;
569  unsigned int height = 2;
570  unsigned int width = 3;
571 
572  armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
573  armnn::TensorInfo outputTensorInfo;
574 
575  unsigned int shape[] = {batchSize, channels, height, width};
576 
577  inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
578  inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
579  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
580 
581  auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
582  auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
583 
584  LayerTestResult<float,4> ret(outputTensorInfo);
585 
586  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
587  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
588  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
589 
590  std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
591  std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
592  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
593 
595  armnn::WorkloadInfo info;
596  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
597  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
598  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
599 
600  armnn::AdditionQueueDescriptor refData = data;
601  armnn::WorkloadInfo refInfo = info;
602  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
603  SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
604  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
605 
606  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
607  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
608 
609  inputHandle1->Allocate();
610  inputHandle2->Allocate();
611  outputHandle->Allocate();
612  inputHandle1Ref->Allocate();
613  inputHandle2Ref->Allocate();
614  outputHandleRef->Allocate();
615 
616  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
617  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
618  CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
619  CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
620 
621  workload->PostAllocationConfigure();
622  workload->Execute();
623  workloadRef->PostAllocationConfigure();
624  workloadRef->Execute();
625 
626  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
627  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
628 
629  return ret;
630 }
LayerTestResult< uint8_t, 4 > AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< T, 4 > AdditionBroadcastTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
LayerTestResult< uint8_t, 4 > AdditionBroadcastUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > AdditionAfterMaxPoolTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PoolWidth
Pooling width value.
boost::multi_array< T, n > outputExpected
LayerTestResult< int16_t, 4 > AdditionBroadcast1ElementInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
LayerTestResult< float, 4 > AdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_PoolHeight
Pooling height value.
LayerTestResult< float, 4 > CompareAdditionTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory)
LayerTestResult< T, 4 > AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
LayerTestResult< float, 5 > Addition5dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > AdditionInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
boost::multi_array< T, n > output
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > AdditionBroadcast1ElementTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:275
A Pooling2dDescriptor for the Pooling2dLayer.
LayerTestResult< float, 4 > AdditionBroadcastTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
LayerTestResult< int16_t, 4 > AdditionBroadcastInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > AdditionUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)