ArmNN
 21.08
AdditionTestImpl.cpp File Reference

Go to the source code of this file.

Functions

template<>
std::unique_ptr< armnn::IWorkloadCreateWorkload< armnn::AdditionQueueDescriptor > (const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const armnn::AdditionQueueDescriptor &descriptor)
 
LayerTestResult< float, 4 > AdditionTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 5 > Addition5dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > AdditionBroadcastTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > AdditionBroadcast1ElementTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > AdditionBroadcastTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > AdditionBroadcastUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > AdditionBroadcastInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int32_t, 4 > AdditionBroadcastInt32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > AdditionBroadcast1ElementTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > AdditionBroadcast1ElementUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > AdditionBroadcast1ElementInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int32_t, 4 > AdditionBroadcast1ElementInt32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > AdditionUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > AdditionInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int32_t, 4 > AdditionInt32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > AdditionAfterMaxPoolTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > CompareAdditionTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
 

Function Documentation

◆ Addition5dTest()

LayerTestResult<float, 5> Addition5dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 92 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

96 {
97  unsigned int depth = 2u;
98  unsigned int batchSize = 2u;
99  unsigned int channels = 2u;
100  unsigned int height = 2u;
101  unsigned int width = 3u;
102 
103  unsigned int shape[] = { depth, batchSize, channels, height, width };
104 
105  std::vector<float> input1 =
106  {
107  2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
108  2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
109 
110  2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
111  0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
112 
113 
114  1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
115  1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
116 
117  0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
118  0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
119 
120  };
121 
122  std::vector<float> input2 =
123  {
124  4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
125  1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
126 
127  4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
128  0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
129 
130 
131  0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
132  2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
133 
134  3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
135  2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
136  };
137 
138  std::vector<float> output =
139  {
140  7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
141  4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
142 
143  7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
144  0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
145 
146 
147  1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
148  3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
149 
150  4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
151  2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
152  };
153 
154  return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
155  workloadFactory,
156  memoryManager,
157  shape,
158  input1,
159  shape,
160  input2,
161  shape,
162  output,
163  tensorHandleFactory);
164 }

◆ AdditionAfterMaxPoolTest()

LayerTestResult<float, 4> AdditionAfterMaxPoolTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 541 of file AdditionTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateAddition(), IWorkloadFactory::CreatePooling2d(), ITensorHandleFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, and armnn::Max.

Referenced by TEST_SUITE().

545 {
546  IgnoreUnused(memoryManager);
547 
548  // Create Initial Tensor
549  // 1, 2, 3
550  // 4, 5, 6
551  // 7, 8, 9
552 
553  armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
554  armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
555 
556  std::vector<float> poolingInput = {1, 2, 3,
557  4, 5, 6,
558  7, 8, 9
559  };
560  std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
561  tensorHandleFactory.CreateTensorHandle(poolingInputTensorInfo);
562  std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
563  tensorHandleFactory.CreateTensorHandle(poolingOutputTensorInfo);
564 
565  // Apply MaxPool poolSize = 1x1, stride=2x2
566  // Result =
567  // 1, 3
568  // 7, 9
569  armnn::Pooling2dDescriptor descriptor;
570  descriptor.m_PoolHeight = 1;
571  descriptor.m_PoolWidth = 1;
572  descriptor.m_StrideX = 2;
573  descriptor.m_StrideY = 2;
575 
576  armnn::Pooling2dQueueDescriptor queueDescriptor;
577  queueDescriptor.m_Parameters = descriptor;
578  armnn::WorkloadInfo workloadInfo;
579  AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
580  AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
581 
582  // Create the MaxPool
583  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
584 
585  std::vector<float> resultMaxPool(poolingOutputTensorInfo.GetNumElements());
586 
587  // Create addition with another tensor the same size
588  // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
589  // with the initial tensor.
590  // 12, 16
591  // 24, 28
592  armnn::TensorInfo addInputTensorInfo({ 1,1,2,2 }, armnn::DataType::Float32);
593  armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2 }, armnn::DataType::Float32);
594 
595  std::vector<float> addInput = { 12, 16,
596  24, 28 };
597 
598  // Expected output tensor after MaxPool and Addition.
599  std::vector<float> actualOutput(addOutputTensorInfo.GetNumElements());
600  std::vector<float> expectedOutput = { 13, 19,
601  31, 37 };
602 
603  std::unique_ptr<armnn::ITensorHandle> addInputHandle = tensorHandleFactory.CreateTensorHandle(addInputTensorInfo);
604  std::unique_ptr<armnn::ITensorHandle> addOutputHandle = tensorHandleFactory.CreateTensorHandle(addOutputTensorInfo);
605 
608 
609  // Add the output of the MaxPool and the new tensor
610  AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
611  AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
612  AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
613 
614  std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
615 
616  poolingInputHandle->Allocate();
617  poolingOutputHandle->Allocate();
618  addInputHandle->Allocate();
619  addOutputHandle->Allocate();
620 
621  CopyDataToITensorHandle(poolingInputHandle.get(), poolingInput.data());
622  CopyDataFromITensorHandle(resultMaxPool.data(), poolingOutputHandle.get());
623 
624  CopyDataToITensorHandle(poolingOutputHandle.get(), resultMaxPool.data());
625  CopyDataToITensorHandle(addInputHandle.get(), addInput.data());
626 
627  workload->PostAllocationConfigure();
628  workload->Execute();
629  addWorkload->PostAllocationConfigure();
630  addWorkload->Execute();
631 
632  CopyDataFromITensorHandle(actualOutput.data(), addOutputHandle.get());
633 
634  return LayerTestResult<float, 4>(actualOutput,
635  expectedOutput,
636  addOutputHandle->GetShape(),
637  addOutputTensorInfo.GetShape());
638 }
uint32_t m_PoolWidth
Pooling width value.
void IgnoreUnused(Ts &&...)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_PoolHeight
Pooling height value.
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about TensorInfos of a layer.
A Pooling2dDescriptor for the Pooling2dLayer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ AdditionBroadcast1ElementInt16Test()

LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 391 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

395 {
396  return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
397  workloadFactory, memoryManager, 0.1333333f, 0, tensorHandleFactory);
398 }

◆ AdditionBroadcast1ElementInt32Test()

LayerTestResult<int32_t, 4> AdditionBroadcast1ElementInt32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 400 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

404 {
405  return AdditionBroadcast1ElementTestImpl<armnn::DataType::Signed32>(
406  workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
407 }

◆ AdditionBroadcast1ElementTest()

LayerTestResult<float, 4> AdditionBroadcast1ElementTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 373 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

377 {
378  return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
379  workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
380 }

◆ AdditionBroadcast1ElementTestImpl()

LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 255 of file AdditionTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateAddition(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

261 {
262  IgnoreUnused(memoryManager);
263  armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
264  armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
265  armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
266 
267  if (armnn::IsQuantizedType<T>())
268  {
269  inputTensorInfo1.SetQuantizationScale(qScale);
270  inputTensorInfo1.SetQuantizationOffset(qOffset);
271  inputTensorInfo2.SetQuantizationScale(qScale);
272  inputTensorInfo2.SetQuantizationOffset(qOffset);
273  outputTensorInfo.SetQuantizationScale(qScale);
274  outputTensorInfo.SetQuantizationOffset(qOffset);
275  }
276 
277  auto input1 = armnnUtils::QuantizedVector<T>(
278  {
279  0.0f, 1.0f, 2.0f,
280  3.0f, 4.0f, 5.0f,
281  6.0f, 7.0f, 8.0f,
282  9.0f, 10.0f, 11.0f,
283  12.0f, 13.0f, 14.0f,
284  15.0f, 16.0f, 17.0f,
285  },
286  qScale, qOffset);
287 
288  auto input2 = armnnUtils::QuantizedVector<T>(
289  {
290  0.5f,
291  },
292  qScale, qOffset);
293 
294  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
295 
296  auto expectedOutput = armnnUtils::QuantizedVector<T>(
297  {
298  0.5f, 1.5f, 2.5f,
299  3.5f, 4.5f, 5.5f,
300  6.5f, 7.5f, 8.5f,
301  9.5f, 10.5f, 11.5f,
302  12.5f, 13.5f, 14.5f,
303  15.5f, 16.5f, 17.5f,
304  },
305  qScale, qOffset);
306 
307  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
308  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
309  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
310 
313  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
314  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
315  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
316 
317  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
318 
319  inputHandle1->Allocate();
320  inputHandle2->Allocate();
321  outputHandle->Allocate();
322 
323  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
324  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
325 
326  workload->PostAllocationConfigure();
327  workload->Execute();
328 
329  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
330 
331  return LayerTestResult<T, 4>(actualOutput,
332  expectedOutput,
333  outputHandle->GetShape(),
334  outputTensorInfo.GetShape());
335 }
void IgnoreUnused(Ts &&...)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ AdditionBroadcast1ElementUint8Test()

LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 382 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

386 {
387  return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
388  workloadFactory, memoryManager, 0.1333333f, 128, tensorHandleFactory);
389 }

◆ AdditionBroadcastInt16Test()

LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 355 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

359 {
360  return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
361  workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
362 }

◆ AdditionBroadcastInt32Test()

LayerTestResult<int32_t, 4> AdditionBroadcastInt32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 364 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

368 {
369  return AdditionBroadcastTestImpl<armnn::DataType::Signed32>(
370  workloadFactory, memoryManager, 1.f, 0, tensorHandleFactory);
371 }

◆ AdditionBroadcastTest()

LayerTestResult<float, 4> AdditionBroadcastTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 337 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

341 {
342  return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
343  workloadFactory, memoryManager, 0.0f, 0, tensorHandleFactory);
344 }

◆ AdditionBroadcastTestImpl()

LayerTestResult<T, 4> AdditionBroadcastTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 167 of file AdditionTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateAddition(), ITensorHandleFactory::CreateTensorHandle(), armnn::IgnoreUnused(), TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

173 {
174  IgnoreUnused(memoryManager);
175  armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
176  armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
177  armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
178 
179  if (armnn::IsQuantizedType<T>())
180  {
181  inputTensorInfo1.SetQuantizationScale(qScale);
182  inputTensorInfo1.SetQuantizationOffset(qOffset);
183  inputTensorInfo2.SetQuantizationScale(qScale);
184  inputTensorInfo2.SetQuantizationOffset(qOffset);
185  outputTensorInfo.SetQuantizationScale(qScale);
186  outputTensorInfo.SetQuantizationOffset(qOffset);
187  }
188 
189  auto input1 = armnnUtils::QuantizedVector<T>(
190  {
191  0.0f,
192  1.0f,
193 
194  2.0f,
195  3.0f,
196 
197  4.0f,
198  5.0f,
199  },
200  qScale, qOffset);
201 
202  auto input2 = armnnUtils::QuantizedVector<T>(
203  {
204  0.5f, 1.5f, 2.5f,
205  3.5f, 4.5f, 5.5f,
206  },
207  qScale, qOffset);
208 
209  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
210 
211  auto expectedOutput = armnnUtils::QuantizedVector<T>(
212  {
213  0.5f, 1.5f, 2.5f,
214  4.5f, 5.5f, 6.5f,
215 
216  2.5f, 3.5f, 4.5f,
217  6.5f, 7.5f, 8.5f,
218 
219  4.5f, 5.5f, 6.5f,
220  8.5f, 9.5f, 10.5f,
221  },
222  qScale, qOffset);
223 
224  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
225  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
226  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
227 
230  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
231  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
232  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
233 
234  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
235 
236  inputHandle1->Allocate();
237  inputHandle2->Allocate();
238  outputHandle->Allocate();
239 
240  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
241  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
242 
243  workload->PostAllocationConfigure();
244  workload->Execute();
245 
246  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
247 
248  return LayerTestResult<T, 4>(actualOutput,
249  expectedOutput,
250  outputHandle->GetShape(),
251  outputTensorInfo.GetShape());
252 }
void IgnoreUnused(Ts &&...)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:491
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ AdditionBroadcastUint8Test()

LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 346 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

350 {
351  return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
352  workloadFactory, memoryManager, 2.f, 0, tensorHandleFactory);
353 }

◆ AdditionInt16Test()

LayerTestResult<int16_t, 4> AdditionInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 453 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

457 {
458  const unsigned int shape0[] = { 1, 2, 2, 3 };
459  const unsigned int shape1[] = { 1, 2, 2, 3 };
460 
461  std::vector<int16_t> input0 =
462  {
463  63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
464  203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
465  };
466 
467  std::vector<int16_t> input1 =
468  {
469  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
470  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
471  };
472 
473  std::vector<int16_t> output =
474  {
475  84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
476  329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
477  };
478 
479  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
480  workloadFactory,
481  memoryManager,
482  shape0,
483  input0,
484  7.0f,
485  0,
486  shape1,
487  input1,
488  7.0f,
489  0,
490  shape0,
491  output,
492  tensorHandleFactory,
493  7.0f,
494  0);
495 }

◆ AdditionInt32Test()

LayerTestResult<int32_t, 4> AdditionInt32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 497 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

501 {
502  const unsigned int shape0[] = { 1, 2, 2, 3 };
503  const unsigned int shape1[] = { 1, 2, 2, 3 };
504 
505  std::vector<int32_t> input0 =
506  {
507  63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
508  203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
509  };
510 
511  std::vector<int32_t> input1 =
512  {
513  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
514  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
515  };
516 
517  std::vector<int32_t> output =
518  {
519  84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
520  329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
521  };
522 
523  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Signed32>(
524  workloadFactory,
525  memoryManager,
526  shape0,
527  input0,
528  1.0f,
529  0,
530  shape1,
531  input1,
532  1.0f,
533  0,
534  shape0,
535  output,
536  tensorHandleFactory,
537  1.0f,
538  0);
539 }

◆ AdditionTest()

LayerTestResult<float,4> AdditionTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 22 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

26 {
27  unsigned int batchSize = 2u;
28  unsigned int channels = 2u;
29  unsigned int height = 2u;
30  unsigned int width = 3u;
31 
32  unsigned int shape[] = { batchSize, channels, height, width };
33 
34  std::vector<float> input1 =
35  {
36  0.0f, 2.0f, 1.0f,
37  0.2f, 1.0f, 2.0f,
38 
39  1.0f, 2.0f, 1.0f,
40  0.2f, 1.0f, 2.0f,
41 
42  0.0f, 2.0f, 1.0f,
43  4.2f, 1.0f, 2.0f,
44 
45  0.0f, 0.0f, 1.0f,
46  0.2f, 1.0f, 2.0f,
47  };
48 
49  std::vector<float> input2 =
50  {
51  1.0f, 2.0f, 1.0f,
52  0.0f, 1.0f, 2.0f,
53 
54  1.0f, 2.0f, -2.0f,
55  0.2f, 1.0f, 2.0f,
56 
57  0.0f, 2.0f, 1.0f,
58  4.2f, 0.0f, -3.0f,
59 
60  0.0f, 0.0f, 1.0f,
61  0.7f, 1.0f, 5.0f,
62  };
63 
64 
65  std::vector<float> output
66  {
67  1.0f, 4.0f, 2.0f,
68  0.2f, 2.0f, 4.0f,
69 
70  2.0f, 4.0f, -1.0f,
71  0.4f, 2.0f, 4.0f,
72 
73  0.0f, 4.0f, 2.0f,
74  8.4f, 1.0f, -1.0f,
75 
76  0.0f, 0.0f, 2.0f,
77  0.9f, 2.0f, 7.0f,
78  };
79 
80  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
81  workloadFactory,
82  memoryManager,
83  shape,
84  input1,
85  shape,
86  input2,
87  shape,
88  output,
89  tensorHandleFactory);
90 }

◆ AdditionUint8Test()

LayerTestResult<uint8_t, 4> AdditionUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 409 of file AdditionTestImpl.cpp.

Referenced by TEST_SUITE().

413 {
414  const unsigned int shape0[] = { 1, 2, 2, 3 };
415  const unsigned int shape1[] = { 1, 2, 2, 3 };
416 
417  std::vector<uint8_t> input0(
418  {
419  63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
420  203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
421  });
422 
423  std::vector<uint8_t> input1(
424  {
425  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
426  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
427  });
428 
429  std::vector<uint8_t> output(
430  {
431  81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
432  255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
433  });
434 
435  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
436  workloadFactory,
437  memoryManager,
438  shape0,
439  input0,
440  7.0f,
441  3,
442  shape1,
443  input1,
444  7.0f,
445  3,
446  shape0,
447  output,
448  tensorHandleFactory,
449  7.0f,
450  3);
451 }

◆ CompareAdditionTest()

LayerTestResult<float,4> CompareAdditionTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory 
)

Definition at line 640 of file AdditionTestImpl.cpp.

References CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateAddition(), ITensorHandleFactory::CreateTensorHandle(), armnn::Float32, TensorInfo::GetNumElements(), TensorInfo::GetShape(), and armnn::IgnoreUnused().

Referenced by TEST_SUITE().

646 {
647  IgnoreUnused(memoryManager);
648  unsigned int batchSize = 4;
649  unsigned int channels = 1;
650  unsigned int height = 2;
651  unsigned int width = 3;
652 
653  armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
654  armnn::TensorInfo outputTensorInfo;
655 
656  unsigned int shape[] = {batchSize, channels, height, width};
657 
658  inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
659  inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
660  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
661 
662  auto input1 = MakeRandomTensor<float>(inputTensorInfo1, 1232);
663  auto input2 = MakeRandomTensor<float>(inputTensorInfo2, 456);
664 
665  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
666  std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
667 
668  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
669  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
670  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
671 
672  std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
673  std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo2);
674  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
675 
678  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
679  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
680  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
681 
682  armnn::AdditionQueueDescriptor refData = data;
683  armnn::WorkloadInfo refInfo = info;
684  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
685  SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
686  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
687 
688  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
689  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
690 
691  inputHandle1->Allocate();
692  inputHandle2->Allocate();
693  outputHandle->Allocate();
694  inputHandle1Ref->Allocate();
695  inputHandle2Ref->Allocate();
696  outputHandleRef->Allocate();
697 
698  CopyDataToITensorHandle(inputHandle1.get(), input1.data());
699  CopyDataToITensorHandle(inputHandle2.get(), input2.data());
700  CopyDataToITensorHandle(inputHandle1Ref.get(), input1.data());
701  CopyDataToITensorHandle(inputHandle2Ref.get(), input2.data());
702 
703  workload->PostAllocationConfigure();
704  workload->Execute();
705  workloadRef->PostAllocationConfigure();
706  workloadRef->Execute();
707 
708  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
709  CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
710 
711  return LayerTestResult<float, 4>(actualOutput,
712  expectedOutput,
713  outputHandle->GetShape(),
714  outputTensorInfo.GetShape());
715 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
void IgnoreUnused(Ts &&...)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ CreateWorkload< armnn::AdditionQueueDescriptor >()

std::unique_ptr<armnn::IWorkload> CreateWorkload< armnn::AdditionQueueDescriptor > ( const armnn::IWorkloadFactory workloadFactory,
const armnn::WorkloadInfo info,
const armnn::AdditionQueueDescriptor descriptor 
)

Definition at line 14 of file AdditionTestImpl.cpp.

18 {
19  return workloadFactory.CreateAddition(descriptor, info);
20 }
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const