ArmNN  NotReleased
AdditionTestImpl.hpp File Reference

Go to the source code of this file.

Functions

LayerTestResult< float, 4 > AdditionTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 5 > Addition5dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > AdditionBroadcast1ElementTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > AdditionBroadcastTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > AdditionUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > AdditionBroadcast1ElementUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > AdditionBroadcastUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > AdditionInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > AdditionBroadcastInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > AdditionBroadcast1ElementInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > AdditionAfterMaxPoolTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > CompareAdditionTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory)
 

Function Documentation

◆ Addition5dTest()

LayerTestResult<float, 5> Addition5dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 89 of file AdditionTestImpl.cpp.

92 {
93  unsigned int depth = 2u;
94  unsigned int batchSize = 2u;
95  unsigned int channels = 2u;
96  unsigned int height = 2u;
97  unsigned int width = 3u;
98 
99  unsigned int shape[] = { depth, batchSize, channels, height, width };
100 
101  std::vector<float> input1 =
102  {
103  2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
104  2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
105 
106  2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
107  0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
108 
109 
110  1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
111  1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
112 
113  0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
114  0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
115 
116  };
117 
118  std::vector<float> input2 =
119  {
120  4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
121  1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
122 
123  4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
124  0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
125 
126 
127  0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
128  2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
129 
130  3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
131  2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
132  };
133 
134  std::vector<float> output =
135  {
136  7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
137  4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
138 
139  7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
140  0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
141 
142 
143  1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
144  3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
145 
146  4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
147  2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
148  };
149 
150  return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
151  workloadFactory,
152  memoryManager,
153  shape,
154  input1,
155  shape,
156  input2,
157  shape,
158  output);
159 }

◆ AdditionAfterMaxPoolTest()

LayerTestResult<float, 4> AdditionAfterMaxPoolTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 454 of file AdditionTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateAddition(), IWorkloadFactory::CreatePooling2d(), IWorkloadFactory::CreateTensorHandle(), armnn::Float32, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, and armnn::Max.

Referenced by BOOST_AUTO_TEST_CASE().

457 {
458  boost::ignore_unused(memoryManager);
459 
460  // Create Initial Tensor
461  // 1, 2, 3
462  // 4, 5, 6
463  // 7, 8, 9
464 
465  armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
466  armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
467 
468  boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
469  {1, 2, 3,
470  4, 5, 6,
471  7, 8, 9
472  });
473 
474  std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
475  workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
476  std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
477  workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
478 
479  // Apply MaxPool poolSize = 1x1, stride=2x2
480  // Result =
481  // 1, 3
482  // 7, 9
483  armnn::Pooling2dDescriptor descriptor;
484  descriptor.m_PoolHeight = 1;
485  descriptor.m_PoolWidth = 1;
486  descriptor.m_StrideX = 2;
487  descriptor.m_StrideY = 2;
489 
490  armnn::Pooling2dQueueDescriptor queueDescriptor;
491  queueDescriptor.m_Parameters = descriptor;
492  armnn::WorkloadInfo workloadInfo;
493  AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
494  AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
495 
496  // Create the MaxPool
497  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
498 
499  //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
500  auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
501  boost::multi_array<float, 4> resultMaxPool;
502  resultMaxPool.resize(shape);
503 
504 
505  // Create addition with another tensor the same size
506  // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
507  // with the initial tensor.
508  // 12, 16
509  // 24, 28
510 
511  armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
512  armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
513 
514  boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
515  {12, 16,
516  24, 28,
517  });
518 
519  // Expected output tensor after MaxPool and Addition.
520  LayerTestResult<float,4> addRet(addOutputTensorInfo);
521  addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
522  {
523  13, 19,
524  31, 37
525  }));
526 
527  std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
528  std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
529 
532 
533  // Add the output of the MaxPool and the new tensor
534  AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
535  AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
536  AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
537 
538  std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
539 
540  poolingInputHandle->Allocate();
541  poolingOutputHandle->Allocate();
542  addInputHandle->Allocate();
543  addOutputHandle->Allocate();
544 
545  CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
546  CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
547 
548  CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
549  CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
550 
551  workload->PostAllocationConfigure();
552  workload->Execute();
553  addWorkload->PostAllocationConfigure();
554  addWorkload->Execute();
555 
556  CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
557 
558  return addRet;
559 }
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
uint32_t m_PoolHeight
Pooling height value.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
uint32_t m_PoolWidth
Pooling width value.
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
A Pooling2dDescriptor for the Pooling2dLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.

◆ AdditionBroadcast1ElementInt16Test()

LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 362 of file AdditionTestImpl.cpp.

365 {
366  return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
367  workloadFactory, memoryManager, 0.1333333f, 0);
368 }

◆ AdditionBroadcast1ElementTest()

LayerTestResult<float, 4> AdditionBroadcast1ElementTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 346 of file AdditionTestImpl.cpp.

349 {
350  return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
351  workloadFactory, memoryManager, 0.0f, 0);
352 }

◆ AdditionBroadcast1ElementUint8Test()

LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 354 of file AdditionTestImpl.cpp.

357 {
358  return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
359  workloadFactory, memoryManager, 0.1333333f, 128);
360 }

◆ AdditionBroadcastInt16Test()

LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 338 of file AdditionTestImpl.cpp.

341 {
342  return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
343  workloadFactory, memoryManager, 2.f, 0);
344 }

◆ AdditionBroadcastTest()

LayerTestResult<float, 4> AdditionBroadcastTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 322 of file AdditionTestImpl.cpp.

325 {
326  return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
327  workloadFactory, memoryManager, 0.0f, 0);
328 }

◆ AdditionBroadcastUint8Test()

LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 330 of file AdditionTestImpl.cpp.

333 {
334  return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
335  workloadFactory, memoryManager, 2.f, 0);
336 }

◆ AdditionInt16Test()

LayerTestResult<int16_t, 4> AdditionInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 412 of file AdditionTestImpl.cpp.

415 {
416  const unsigned int shape0[] = { 1, 2, 2, 3 };
417  const unsigned int shape1[] = { 1, 2, 2, 3 };
418 
419  std::vector<int16_t> input0 =
420  {
421  63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
422  203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
423  };
424 
425  std::vector<int16_t> input1 =
426  {
427  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
428  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
429  };
430 
431  std::vector<int16_t> output =
432  {
433  84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
434  329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
435  };
436 
437  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
438  workloadFactory,
439  memoryManager,
440  shape0,
441  input0,
442  7.0f,
443  0,
444  shape1,
445  input1,
446  7.0f,
447  0,
448  shape0,
449  output,
450  7.0f,
451  0);
452 }

◆ AdditionTest()

LayerTestResult<float, 4> AdditionTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 21 of file AdditionTestImpl.cpp.

24 {
25  unsigned int batchSize = 2u;
26  unsigned int channels = 2u;
27  unsigned int height = 2u;
28  unsigned int width = 3u;
29 
30  unsigned int shape[] = { batchSize, channels, height, width };
31 
32  std::vector<float> input1 =
33  {
34  0.0f, 2.0f, 1.0f,
35  0.2f, 1.0f, 2.0f,
36 
37  1.0f, 2.0f, 1.0f,
38  0.2f, 1.0f, 2.0f,
39 
40  0.0f, 2.0f, 1.0f,
41  4.2f, 1.0f, 2.0f,
42 
43  0.0f, 0.0f, 1.0f,
44  0.2f, 1.0f, 2.0f,
45  };
46 
47  std::vector<float> input2 =
48  {
49  1.0f, 2.0f, 1.0f,
50  0.0f, 1.0f, 2.0f,
51 
52  1.0f, 2.0f, -2.0f,
53  0.2f, 1.0f, 2.0f,
54 
55  0.0f, 2.0f, 1.0f,
56  4.2f, 0.0f, -3.0f,
57 
58  0.0f, 0.0f, 1.0f,
59  0.7f, 1.0f, 5.0f,
60  };
61 
62 
63  std::vector<float> output
64  {
65  1.0f, 4.0f, 2.0f,
66  0.2f, 2.0f, 4.0f,
67 
68  2.0f, 4.0f, -1.0f,
69  0.4f, 2.0f, 4.0f,
70 
71  0.0f, 4.0f, 2.0f,
72  8.4f, 1.0f, -1.0f,
73 
74  0.0f, 0.0f, 2.0f,
75  0.9f, 2.0f, 7.0f,
76  };
77 
78  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
79  workloadFactory,
80  memoryManager,
81  shape,
82  input1,
83  shape,
84  input2,
85  shape,
86  output);
87 }

◆ AdditionUint8Test()

LayerTestResult<uint8_t, 4> AdditionUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 370 of file AdditionTestImpl.cpp.

373 {
374  const unsigned int shape0[] = { 1, 2, 2, 3 };
375  const unsigned int shape1[] = { 1, 2, 2, 3 };
376 
377  std::vector<uint8_t> input0(
378  {
379  63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
380  203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
381  });
382 
383  std::vector<uint8_t> input1(
384  {
385  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
386  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
387  });
388 
389  std::vector<uint8_t> output(
390  {
391  81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
392  255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
393  });
394 
395  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
396  workloadFactory,
397  memoryManager,
398  shape0,
399  input0,
400  7.0f,
401  3,
402  shape1,
403  input1,
404  7.0f,
405  3,
406  shape0,
407  output,
408  7.0f,
409  3);
410 }

◆ CompareAdditionTest()

LayerTestResult<float, 4> CompareAdditionTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory 
)

Definition at line 561 of file AdditionTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateAddition(), IWorkloadFactory::CreateTensorHandle(), and armnn::Float32.

565 {
566  boost::ignore_unused(memoryManager);
567  unsigned int batchSize = 4;
568  unsigned int channels = 1;
569  unsigned int height = 2;
570  unsigned int width = 3;
571 
572  armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
573  armnn::TensorInfo outputTensorInfo;
574 
575  unsigned int shape[] = {batchSize, channels, height, width};
576 
577  inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
578  inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
579  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
580 
581  auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
582  auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
583 
584  LayerTestResult<float,4> ret(outputTensorInfo);
585 
586  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
587  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
588  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
589 
590  std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
591  std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
592  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
593 
596  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
597  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
598  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
599 
600  armnn::AdditionQueueDescriptor refData = data;
601  armnn::WorkloadInfo refInfo = info;
602  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
603  SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
604  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
605 
606  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
607  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
608 
609  inputHandle1->Allocate();
610  inputHandle2->Allocate();
611  outputHandle->Allocate();
612  inputHandle1Ref->Allocate();
613  inputHandle2Ref->Allocate();
614  outputHandleRef->Allocate();
615 
616  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
617  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
618  CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
619  CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
620 
621  workload->PostAllocationConfigure();
622  workload->Execute();
623  workloadRef->PostAllocationConfigure();
624  workloadRef->Execute();
625 
626  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
627  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
628 
629  return ret;
630 }
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const