ArmNN
 22.05
AdditionTestImpl.hpp File Reference

Go to the source code of this file.

Functions

LayerTestResult< float, 4 > AdditionTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 5 > Addition5dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > AdditionBroadcast1ElementTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > AdditionBroadcastTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > AdditionUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > AdditionBroadcast1ElementUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > AdditionBroadcastUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > AdditionInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > AdditionBroadcastInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > AdditionBroadcast1ElementInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int32_t, 4 > AdditionInt32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int32_t, 4 > AdditionBroadcastInt32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int32_t, 4 > AdditionBroadcast1ElementInt32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > AdditionAfterMaxPoolTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > CompareAdditionTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
 

Function Documentation

◆ Addition5dTest()

LayerTestResult<float, 5> Addition5dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 92 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

96 {
97  unsigned int depth = 2u;
98  unsigned int batchSize = 2u;
99  unsigned int channels = 2u;
100  unsigned int height = 2u;
101  unsigned int width = 3u;
102 
103  unsigned int shape[] = { depth, batchSize, channels, height, width };
104 
105  std::vector<float> input1 =
106  {
107  2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
108  2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
109 
110  2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
111  0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
112 
113 
114  1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
115  1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
116 
117  0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
118  0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
119 
120  };
121 
122  std::vector<float> input2 =
123  {
124  4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
125  1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
126 
127  4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
128  0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
129 
130 
131  0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
132  2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
133 
134  3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
135  2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
136  };
137 
138  std::vector<float> output =
139  {
140  7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
141  4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
142 
143  7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
144  0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
145 
146 
147  1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
148  3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
149 
150  4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
151  2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
152  };
153 
154  return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
155  workloadFactory,
156  memoryManager,
157  shape,
158  input1,
159  shape,
160  input2,
161  shape,
162  output,
163  tensorHandleFactory);
164 }

◆ AdditionAfterMaxPoolTest()

LayerTestResult<float, 4> AdditionAfterMaxPoolTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 543 of file AdditionTestImpl.cpp.

References armnn::Addition, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), ITensorHandleFactory::CreateTensorHandle(), IWorkloadFactory::CreateWorkload(), armnn::Float32, armnn::IgnoreUnused(), QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, armnn::Max, and armnn::Pooling2d.

Referenced by TEST_SUITE().

547 {
548  IgnoreUnused(memoryManager);
549 
550  // Create Initial Tensor
551  // 1, 2, 3
552  // 4, 5, 6
553  // 7, 8, 9
554 
555  armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
556  armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
557 
558  std::vector<float> poolingInput = {1, 2, 3,
559  4, 5, 6,
560  7, 8, 9
561  };
562  std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
563  tensorHandleFactory.CreateTensorHandle(poolingInputTensorInfo);
564  std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
565  tensorHandleFactory.CreateTensorHandle(poolingOutputTensorInfo);
566 
567  // Apply MaxPool poolSize = 1x1, stride=2x2
568  // Result =
569  // 1, 3
570  // 7, 9
571  armnn::Pooling2dDescriptor descriptor;
572  descriptor.m_PoolHeight = 1;
573  descriptor.m_PoolWidth = 1;
574  descriptor.m_StrideX = 2;
575  descriptor.m_StrideY = 2;
577 
578  armnn::Pooling2dQueueDescriptor queueDescriptor;
579  queueDescriptor.m_Parameters = descriptor;
580  armnn::WorkloadInfo workloadInfo;
581  AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
582  AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
583 
584  // Create the MaxPool
585  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Pooling2d,
586  queueDescriptor,
587  workloadInfo);
588 
589  std::vector<float> resultMaxPool(poolingOutputTensorInfo.GetNumElements());
590 
591  // Create addition with another tensor the same size
592  // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
593  // with the initial tensor.
594  // 12, 16
595  // 24, 28
596  armnn::TensorInfo addInputTensorInfo({ 1,1,2,2 }, armnn::DataType::Float32);
597  armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2 }, armnn::DataType::Float32);
598 
599  std::vector<float> addInput = { 12, 16,
600  24, 28 };
601 
602  // Expected output tensor after MaxPool and Addition.
603  std::vector<float> actualOutput(addOutputTensorInfo.GetNumElements());
604  std::vector<float> expectedOutput = { 13, 19,
605  31, 37 };
606 
607  std::unique_ptr<armnn::ITensorHandle> addInputHandle = tensorHandleFactory.CreateTensorHandle(addInputTensorInfo);
608  std::unique_ptr<armnn::ITensorHandle> addOutputHandle = tensorHandleFactory.CreateTensorHandle(addOutputTensorInfo);
609 
612 
613  // Add the output of the MaxPool and the new tensor
614  AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
615  AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
616  AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
617 
618  std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateWorkload(armnn::LayerType::Addition,
619  data, info);
620 
621  poolingInputHandle->Allocate();
622  poolingOutputHandle->Allocate();
623  addInputHandle->Allocate();
624  addOutputHandle->Allocate();
625 
626  CopyDataToITensorHandle(poolingInputHandle.get(), poolingInput.data());
627  CopyDataFromITensorHandle(resultMaxPool.data(), poolingOutputHandle.get());
628 
629  CopyDataToITensorHandle(poolingOutputHandle.get(), resultMaxPool.data());
630  CopyDataToITensorHandle(addInputHandle.get(), addInput.data());
631 
632  workload->PostAllocationConfigure();
633  workload->Execute();
634  addWorkload->PostAllocationConfigure();
635  addWorkload->Execute();
636 
637  CopyDataFromITensorHandle(actualOutput.data(), addOutputHandle.get());
638 
639  return LayerTestResult<float, 4>(actualOutput,
640  expectedOutput,
641  addOutputHandle->GetShape(),
642  addOutputTensorInfo.GetShape());
643 }
uint32_t m_PoolWidth
Pooling width value.
void IgnoreUnused(Ts &&...)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PoolHeight
Pooling height value.
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
A Pooling2dDescriptor for the Pooling2dLayer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.

◆ AdditionBroadcast1ElementInt16Test()

LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 393 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

397 {
398  return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
399  workloadFactory, memoryManager, 0.1333333f, 0, tensorHandleFactory);
400 }

◆ AdditionBroadcast1ElementInt32Test()

LayerTestResult<int32_t, 4> AdditionBroadcast1ElementInt32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 402 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

406 {
407  return AdditionBroadcast1ElementTestImpl<armnn::DataType::Signed32>(
408  workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
409 }

◆ AdditionBroadcast1ElementTest()

LayerTestResult<float, 4> AdditionBroadcast1ElementTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 375 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

379 {
380  return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
381  workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
382 }

◆ AdditionBroadcast1ElementUint8Test()

LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 384 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

388 {
389  return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
390  workloadFactory, memoryManager, 0.1333333f, 128, tensorHandleFactory);
391 }

◆ AdditionBroadcastInt16Test()

LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 357 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

361 {
362  return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
363  workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
364 }

◆ AdditionBroadcastInt32Test()

LayerTestResult<int32_t, 4> AdditionBroadcastInt32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 366 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

370 {
371  return AdditionBroadcastTestImpl<armnn::DataType::Signed32>(
372  workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
373 }

◆ AdditionBroadcastTest()

LayerTestResult<float, 4> AdditionBroadcastTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 339 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

343 {
344  return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
345  workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
346 }

◆ AdditionBroadcastUint8Test()

LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 348 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

352 {
353  return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
354  workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
355 }

◆ AdditionInt16Test()

LayerTestResult<int16_t, 4> AdditionInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 455 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

459 {
460  const unsigned int shape0[] = { 1, 2, 2, 3 };
461  const unsigned int shape1[] = { 1, 2, 2, 3 };
462 
463  std::vector<int16_t> input0 =
464  {
465  63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
466  203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
467  };
468 
469  std::vector<int16_t> input1 =
470  {
471  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
472  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
473  };
474 
475  std::vector<int16_t> output =
476  {
477  84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
478  329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
479  };
480 
481  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
482  workloadFactory,
483  memoryManager,
484  shape0,
485  input0,
486  7.0f,
487  0,
488  shape1,
489  input1,
490  7.0f,
491  0,
492  shape0,
493  output,
494  tensorHandleFactory,
495  7.0f,
496  0);
497 }

◆ AdditionInt32Test()

LayerTestResult<int32_t, 4> AdditionInt32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 499 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

503 {
504  const unsigned int shape0[] = { 1, 2, 2, 3 };
505  const unsigned int shape1[] = { 1, 2, 2, 3 };
506 
507  std::vector<int32_t> input0 =
508  {
509  63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
510  203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
511  };
512 
513  std::vector<int32_t> input1 =
514  {
515  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
516  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
517  };
518 
519  std::vector<int32_t> output =
520  {
521  84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
522  329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
523  };
524 
525  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Signed32>(
526  workloadFactory,
527  memoryManager,
528  shape0,
529  input0,
530  1.0f,
531  0,
532  shape1,
533  input1,
534  1.0f,
535  0,
536  shape0,
537  output,
538  tensorHandleFactory,
539  1.0f,
540  0);
541 }

◆ AdditionTest()

LayerTestResult<float, 4> AdditionTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 22 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

26 {
27  unsigned int batchSize = 2u;
28  unsigned int channels = 2u;
29  unsigned int height = 2u;
30  unsigned int width = 3u;
31 
32  unsigned int shape[] = { batchSize, channels, height, width };
33 
34  std::vector<float> input1 =
35  {
36  0.0f, 2.0f, 1.0f,
37  0.2f, 1.0f, 2.0f,
38 
39  1.0f, 2.0f, 1.0f,
40  0.2f, 1.0f, 2.0f,
41 
42  0.0f, 2.0f, 1.0f,
43  4.2f, 1.0f, 2.0f,
44 
45  0.0f, 0.0f, 1.0f,
46  0.2f, 1.0f, 2.0f,
47  };
48 
49  std::vector<float> input2 =
50  {
51  1.0f, 2.0f, 1.0f,
52  0.0f, 1.0f, 2.0f,
53 
54  1.0f, 2.0f, -2.0f,
55  0.2f, 1.0f, 2.0f,
56 
57  0.0f, 2.0f, 1.0f,
58  4.2f, 0.0f, -3.0f,
59 
60  0.0f, 0.0f, 1.0f,
61  0.7f, 1.0f, 5.0f,
62  };
63 
64 
65  std::vector<float> output
66  {
67  1.0f, 4.0f, 2.0f,
68  0.2f, 2.0f, 4.0f,
69 
70  2.0f, 4.0f, -1.0f,
71  0.4f, 2.0f, 4.0f,
72 
73  0.0f, 4.0f, 2.0f,
74  8.4f, 1.0f, -1.0f,
75 
76  0.0f, 0.0f, 2.0f,
77  0.9f, 2.0f, 7.0f,
78  };
79 
80  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
81  workloadFactory,
82  memoryManager,
83  shape,
84  input1,
85  shape,
86  input2,
87  shape,
88  output,
89  tensorHandleFactory);
90 }

◆ AdditionUint8Test()

LayerTestResult<uint8_t, 4> AdditionUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 411 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

415 {
416  const unsigned int shape0[] = { 1, 2, 2, 3 };
417  const unsigned int shape1[] = { 1, 2, 2, 3 };
418 
419  std::vector<uint8_t> input0(
420  {
421  63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
422  203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
423  });
424 
425  std::vector<uint8_t> input1(
426  {
427  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
428  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
429  });
430 
431  std::vector<uint8_t> output(
432  {
433  81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
434  255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
435  });
436 
437  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
438  workloadFactory,
439  memoryManager,
440  shape0,
441  input0,
442  7.0f,
443  3,
444  shape1,
445  input1,
446  7.0f,
447  3,
448  shape0,
449  output,
450  tensorHandleFactory,
451  7.0f,
452  3);
453 }

◆ CompareAdditionTest()

LayerTestResult<float, 4> CompareAdditionTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory 
)

Definition at line 645 of file AdditionTestImpl.cpp.

References armnn::Addition, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), ITensorHandleFactory::CreateTensorHandle(), IWorkloadFactory::CreateWorkload(), armnn::Float32, TensorInfo::GetNumElements(), TensorInfo::GetShape(), and armnn::IgnoreUnused().

Referenced by TEST_SUITE().

651 {
652  IgnoreUnused(memoryManager);
653  unsigned int batchSize = 4;
654  unsigned int channels = 1;
655  unsigned int height = 2;
656  unsigned int width = 3;
657 
658  armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
659  armnn::TensorInfo outputTensorInfo;
660 
661  unsigned int shape[] = {batchSize, channels, height, width};
662 
663  inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
664  inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
665  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
666 
667  auto input1 = MakeRandomTensor<float>(inputTensorInfo1, 1232);
668  auto input2 = MakeRandomTensor<float>(inputTensorInfo2, 456);
669 
670  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
671  std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
672 
673  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
674  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
675  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
676 
677  std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
678  std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
679  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
680 
683  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
684  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
685  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
686 
687  armnn::AdditionQueueDescriptor refData = data;
688  armnn::WorkloadInfo refInfo = info;
689  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
690  SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
691  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
692 
693  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Addition,
694  data, info);
695  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateWorkload(armnn::LayerType::Addition,
696  refData, refInfo);
697 
698  inputHandle1->Allocate();
699  inputHandle2->Allocate();
700  outputHandle->Allocate();
701  inputHandle1Ref->Allocate();
702  inputHandle2Ref->Allocate();
703  outputHandleRef->Allocate();
704 
705  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
706  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
707  CopyDataToITensorHandle(inputHandle1Ref.get(), input1.data());
708  CopyDataToITensorHandle(inputHandle2Ref.get(), input2.data());
709 
710  workload->PostAllocationConfigure();
711  workload->Execute();
712  workloadRef->PostAllocationConfigure();
713  workloadRef->Execute();
714 
715  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
716  CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
717 
718  return LayerTestResult<float, 4>(actualOutput,
719  expectedOutput,
720  outputHandle->GetShape(),
721  outputTensorInfo.GetShape());
722 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
void IgnoreUnused(Ts &&...)
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const
Definition: Tensor.hpp:196