ArmNN
 21.02
AdditionTestImpl.hpp File Reference

Go to the source code of this file.

Functions

LayerTestResult< float, 4 > AdditionTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 5 > Addition5dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > AdditionBroadcast1ElementTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > AdditionBroadcastTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > AdditionUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > AdditionBroadcast1ElementUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > AdditionBroadcastUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > AdditionInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > AdditionBroadcastInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > AdditionBroadcast1ElementInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int32_t, 4 > AdditionInt32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int32_t, 4 > AdditionBroadcastInt32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int32_t, 4 > AdditionBroadcast1ElementInt32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > AdditionAfterMaxPoolTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > CompareAdditionTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
 

Function Documentation

◆ Addition5dTest()

LayerTestResult<float, 5> Addition5dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 92 of file AdditionTestImpl.cpp.

96 {
97  unsigned int depth = 2u;
98  unsigned int batchSize = 2u;
99  unsigned int channels = 2u;
100  unsigned int height = 2u;
101  unsigned int width = 3u;
102 
103  unsigned int shape[] = { depth, batchSize, channels, height, width };
104 
105  std::vector<float> input1 =
106  {
107  2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
108  2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
109 
110  2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
111  0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
112 
113 
114  1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
115  1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
116 
117  0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
118  0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
119 
120  };
121 
122  std::vector<float> input2 =
123  {
124  4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
125  1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
126 
127  4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
128  0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
129 
130 
131  0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
132  2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
133 
134  3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
135  2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
136  };
137 
138  std::vector<float> output =
139  {
140  7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
141  4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
142 
143  7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
144  0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
145 
146 
147  1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
148  3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
149 
150  4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
151  2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
152  };
153 
154  return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
155  workloadFactory,
156  memoryManager,
157  shape,
158  input1,
159  shape,
160  input2,
161  shape,
162  output,
163  tensorHandleFactory);
164 }

◆ AdditionAfterMaxPoolTest()

LayerTestResult<float, 4> AdditionAfterMaxPoolTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 533 of file AdditionTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateAddition(), IWorkloadFactory::CreatePooling2d(), ITensorHandleFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, and armnn::Max.

Referenced by BOOST_AUTO_TEST_CASE().

537 {
538  IgnoreUnused(memoryManager);
539 
540  // Create Initial Tensor
541  // 1, 2, 3
542  // 4, 5, 6
543  // 7, 8, 9
544 
545  armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
546  armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
547 
548  boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
549  {1, 2, 3,
550  4, 5, 6,
551  7, 8, 9
552  });
553  std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
554  tensorHandleFactory.CreateTensorHandle(poolingInputTensorInfo);
555  std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
556  tensorHandleFactory.CreateTensorHandle(poolingOutputTensorInfo);
557 
558  // Apply MaxPool poolSize = 1x1, stride=2x2
559  // Result =
560  // 1, 3
561  // 7, 9
562  armnn::Pooling2dDescriptor descriptor;
563  descriptor.m_PoolHeight = 1;
564  descriptor.m_PoolWidth = 1;
565  descriptor.m_StrideX = 2;
566  descriptor.m_StrideY = 2;
568 
569  armnn::Pooling2dQueueDescriptor queueDescriptor;
570  queueDescriptor.m_Parameters = descriptor;
571  armnn::WorkloadInfo workloadInfo;
572  AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
573  AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
574 
575  // Create the MaxPool
576  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
577 
578  //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
579  auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
580  boost::multi_array<float, 4> resultMaxPool;
581  resultMaxPool.resize(shape);
582 
583 
584  // Create addition with another tensor the same size
585  // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
586  // with the initial tensor.
587  // 12, 16
588  // 24, 28
589 
590  armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
591  armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
592 
593  boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
594  {12, 16,
595  24, 28,
596  });
597 
598  // Expected output tensor after MaxPool and Addition.
599  LayerTestResult<float,4> addRet(addOutputTensorInfo);
600  addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
601  {
602  13, 19,
603  31, 37
604  }));
605 
606  std::unique_ptr<armnn::ITensorHandle> addInputHandle = tensorHandleFactory.CreateTensorHandle(addInputTensorInfo);
607  std::unique_ptr<armnn::ITensorHandle> addOutputHandle =
608  tensorHandleFactory.CreateTensorHandle(addOutputTensorInfo);
609 
612 
613  // Add the output of the MaxPool and the new tensor
614  AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
615  AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
616  AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
617 
618  std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
619 
620  poolingInputHandle->Allocate();
621  poolingOutputHandle->Allocate();
622  addInputHandle->Allocate();
623  addOutputHandle->Allocate();
624 
625  CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
626  CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
627 
628  CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
629  CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
630 
631  workload->PostAllocationConfigure();
632  workload->Execute();
633  addWorkload->PostAllocationConfigure();
634  addWorkload->Execute();
635 
636  CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
637 
638  return addRet;
639 }
uint32_t m_PoolWidth
Pooling width value.
void IgnoreUnused(Ts &&...)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PoolHeight
Pooling height value.
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
A Pooling2dDescriptor for the Pooling2dLayer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ AdditionBroadcast1ElementInt16Test()

LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 383 of file AdditionTestImpl.cpp.

387 {
388  return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
389  workloadFactory, memoryManager, 0.1333333f, 0, tensorHandleFactory);
390 }

◆ AdditionBroadcast1ElementInt32Test()

LayerTestResult<int32_t, 4> AdditionBroadcast1ElementInt32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 392 of file AdditionTestImpl.cpp.

396 {
397  return AdditionBroadcast1ElementTestImpl<armnn::DataType::Signed32>(
398  workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
399 }

◆ AdditionBroadcast1ElementTest()

LayerTestResult<float, 4> AdditionBroadcast1ElementTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 365 of file AdditionTestImpl.cpp.

369 {
370  return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
371  workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
372 }

◆ AdditionBroadcast1ElementUint8Test()

LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 374 of file AdditionTestImpl.cpp.

378 {
379  return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
380  workloadFactory, memoryManager, 0.1333333f, 128, tensorHandleFactory);
381 }

◆ AdditionBroadcastInt16Test()

LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 347 of file AdditionTestImpl.cpp.

351 {
352  return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
353  workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
354 }

◆ AdditionBroadcastInt32Test()

LayerTestResult<int32_t, 4> AdditionBroadcastInt32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 356 of file AdditionTestImpl.cpp.

360 {
361  return AdditionBroadcastTestImpl<armnn::DataType::Signed32>(
362  workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
363 }

◆ AdditionBroadcastTest()

LayerTestResult<float, 4> AdditionBroadcastTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 329 of file AdditionTestImpl.cpp.

333 {
334  return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
335  workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
336 }

◆ AdditionBroadcastUint8Test()

LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 338 of file AdditionTestImpl.cpp.

342 {
343  return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
344  workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
345 }

◆ AdditionInt16Test()

LayerTestResult<int16_t, 4> AdditionInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 445 of file AdditionTestImpl.cpp.

449 {
450  const unsigned int shape0[] = { 1, 2, 2, 3 };
451  const unsigned int shape1[] = { 1, 2, 2, 3 };
452 
453  std::vector<int16_t> input0 =
454  {
455  63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
456  203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
457  };
458 
459  std::vector<int16_t> input1 =
460  {
461  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
462  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
463  };
464 
465  std::vector<int16_t> output =
466  {
467  84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
468  329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
469  };
470 
471  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
472  workloadFactory,
473  memoryManager,
474  shape0,
475  input0,
476  7.0f,
477  0,
478  shape1,
479  input1,
480  7.0f,
481  0,
482  shape0,
483  output,
484  tensorHandleFactory,
485  7.0f,
486  0);
487 }

◆ AdditionInt32Test()

LayerTestResult<int32_t, 4> AdditionInt32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 489 of file AdditionTestImpl.cpp.

493 {
494  const unsigned int shape0[] = { 1, 2, 2, 3 };
495  const unsigned int shape1[] = { 1, 2, 2, 3 };
496 
497  std::vector<int32_t> input0 =
498  {
499  63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
500  203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
501  };
502 
503  std::vector<int32_t> input1 =
504  {
505  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
506  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
507  };
508 
509  std::vector<int32_t> output =
510  {
511  84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
512  329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
513  };
514 
515  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Signed32>(
516  workloadFactory,
517  memoryManager,
518  shape0,
519  input0,
520  1.0f,
521  0,
522  shape1,
523  input1,
524  1.0f,
525  0,
526  shape0,
527  output,
528  tensorHandleFactory,
529  1.0f,
530  0);
531 }

◆ AdditionTest()

LayerTestResult<float, 4> AdditionTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 22 of file AdditionTestImpl.cpp.

Referenced by armnn::GetVector().

26 {
27  unsigned int batchSize = 2u;
28  unsigned int channels = 2u;
29  unsigned int height = 2u;
30  unsigned int width = 3u;
31 
32  unsigned int shape[] = { batchSize, channels, height, width };
33 
34  std::vector<float> input1 =
35  {
36  0.0f, 2.0f, 1.0f,
37  0.2f, 1.0f, 2.0f,
38 
39  1.0f, 2.0f, 1.0f,
40  0.2f, 1.0f, 2.0f,
41 
42  0.0f, 2.0f, 1.0f,
43  4.2f, 1.0f, 2.0f,
44 
45  0.0f, 0.0f, 1.0f,
46  0.2f, 1.0f, 2.0f,
47  };
48 
49  std::vector<float> input2 =
50  {
51  1.0f, 2.0f, 1.0f,
52  0.0f, 1.0f, 2.0f,
53 
54  1.0f, 2.0f, -2.0f,
55  0.2f, 1.0f, 2.0f,
56 
57  0.0f, 2.0f, 1.0f,
58  4.2f, 0.0f, -3.0f,
59 
60  0.0f, 0.0f, 1.0f,
61  0.7f, 1.0f, 5.0f,
62  };
63 
64 
65  std::vector<float> output
66  {
67  1.0f, 4.0f, 2.0f,
68  0.2f, 2.0f, 4.0f,
69 
70  2.0f, 4.0f, -1.0f,
71  0.4f, 2.0f, 4.0f,
72 
73  0.0f, 4.0f, 2.0f,
74  8.4f, 1.0f, -1.0f,
75 
76  0.0f, 0.0f, 2.0f,
77  0.9f, 2.0f, 7.0f,
78  };
79 
80  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
81  workloadFactory,
82  memoryManager,
83  shape,
84  input1,
85  shape,
86  input2,
87  shape,
88  output,
89  tensorHandleFactory);
90 }

◆ AdditionUint8Test()

LayerTestResult<uint8_t, 4> AdditionUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 401 of file AdditionTestImpl.cpp.

405 {
406  const unsigned int shape0[] = { 1, 2, 2, 3 };
407  const unsigned int shape1[] = { 1, 2, 2, 3 };
408 
409  std::vector<uint8_t> input0(
410  {
411  63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
412  203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
413  });
414 
415  std::vector<uint8_t> input1(
416  {
417  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
418  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
419  });
420 
421  std::vector<uint8_t> output(
422  {
423  81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
424  255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
425  });
426 
427  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
428  workloadFactory,
429  memoryManager,
430  shape0,
431  input0,
432  7.0f,
433  3,
434  shape1,
435  input1,
436  7.0f,
437  3,
438  shape0,
439  output,
440  tensorHandleFactory,
441  7.0f,
442  3);
443 }

◆ CompareAdditionTest()

LayerTestResult<float, 4> CompareAdditionTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory 
)

Definition at line 641 of file AdditionTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateAddition(), ITensorHandleFactory::CreateTensorHandle(), armnn::Float32, and armnn::IgnoreUnused().

647 {
648  IgnoreUnused(memoryManager);
649  unsigned int batchSize = 4;
650  unsigned int channels = 1;
651  unsigned int height = 2;
652  unsigned int width = 3;
653 
654  armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
655  armnn::TensorInfo outputTensorInfo;
656 
657  unsigned int shape[] = {batchSize, channels, height, width};
658 
659  inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
660  inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
661  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
662 
663  auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
664  auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
665 
666  LayerTestResult<float,4> ret(outputTensorInfo);
667 
668  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
669  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
670  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
671 
672  std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
673  std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
674  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
675 
678  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
679  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
680  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
681 
682  armnn::AdditionQueueDescriptor refData = data;
683  armnn::WorkloadInfo refInfo = info;
684  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
685  SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
686  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
687 
688  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
689  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
690 
691  inputHandle1->Allocate();
692  inputHandle2->Allocate();
693  outputHandle->Allocate();
694  inputHandle1Ref->Allocate();
695  inputHandle2Ref->Allocate();
696  outputHandleRef->Allocate();
697 
698  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
699  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
700  CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
701  CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
702 
703  workload->PostAllocationConfigure();
704  workload->Execute();
705  workloadRef->PostAllocationConfigure();
706  workloadRef->Execute();
707 
708  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
709  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
710 
711  return ret;
712 }
void IgnoreUnused(Ts &&...)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)