ArmNN
 20.08
AdditionTestImpl.cpp File Reference

Go to the source code of this file.

Functions

template<>
std::unique_ptr< armnn::IWorkloadCreateWorkload< armnn::AdditionQueueDescriptor > (const armnn::IWorkloadFactory &workloadFactory, const armnn::WorkloadInfo &info, const armnn::AdditionQueueDescriptor &descriptor)
 
LayerTestResult< float, 4 > AdditionTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 5 > Addition5dTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > AdditionBroadcastTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult< T, 4 > AdditionBroadcast1ElementTestImpl (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, float qScale, int32_t qOffset)
 
LayerTestResult< float, 4 > AdditionBroadcastTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > AdditionBroadcastUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > AdditionBroadcastInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int32_t, 4 > AdditionBroadcastInt32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > AdditionBroadcast1ElementTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > AdditionBroadcast1ElementUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > AdditionBroadcast1ElementInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int32_t, 4 > AdditionBroadcast1ElementInt32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > AdditionUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > AdditionInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int32_t, 4 > AdditionInt32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > AdditionAfterMaxPoolTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > CompareAdditionTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory)
 

Function Documentation

◆ Addition5dTest()

LayerTestResult<float, 5> Addition5dTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 89 of file AdditionTestImpl.cpp.

92 {
93  unsigned int depth = 2u;
94  unsigned int batchSize = 2u;
95  unsigned int channels = 2u;
96  unsigned int height = 2u;
97  unsigned int width = 3u;
98 
99  unsigned int shape[] = { depth, batchSize, channels, height, width };
100 
101  std::vector<float> input1 =
102  {
103  2.6f, 4.0f, 4.4f, 2.7f, 4.6f, 2.8f,
104  2.3f, 1.9f, 3.4f, 2.9f, 2.2f, 4.5f,
105 
106  2.8f, 1.9f, 2.3f, 2.6f, 4.7f, 3.5f,
107  0.4f, 1.5f, 2.1f, 0.7f, 5.0f, 1.1f,
108 
109 
110  1.0f, 2.7f, 0.0f, 0.6f, 0.8f, 0.9f,
111  1.0f, 2.6f, 0.4f, 3.8f, 0.4f, 0.8f,
112 
113  0.5f, 4.3f, 3.1f, 4.4f, 0.7f, 1.4f,
114  0.4f, 4.4f, 0.7f, 0.6f, 4.7f, 1.2f,
115 
116  };
117 
118  std::vector<float> input2 =
119  {
120  4.4f, 3.0f, 1.0f, 0.0f, 3.9f, 3.1f,
121  1.7f, 2.9f, 1.3f, 0.4f, 0.4f, 4.3f,
122 
123  4.5f, 0.2f, 2.2f, 4.1f, 3.9f, 3.0f,
124  0.1f, 2.5f, 4.1f, 4.6f, 1.5f, 0.0f,
125 
126 
127  0.5f, 4.9f, 2.5f, 1.5f, 3.4f, 4.5f,
128  2.0f, 3.0f, 4.9f, 1.6f, 2.4f, 3.4f,
129 
130  3.6f, 1.8f, 1.3f, 2.6f, 2.1f, 4.8f,
131  2.0f, 4.3f, 4.0f, 0.2f, 0.6f, 4.4f,
132  };
133 
134  std::vector<float> output =
135  {
136  7.0f, 7.0f, 5.4f, 2.7f, 8.5f, 5.9f,
137  4.0f, 4.8f, 4.7f, 3.3f, 2.6f, 8.8f,
138 
139  7.3f, 2.1f, 4.5f, 6.7f, 8.6f, 6.5f,
140  0.5f, 4.0f, 6.2f, 5.3f, 6.5f, 1.1f,
141 
142 
143  1.5f, 7.6f, 2.5f, 2.1f, 4.2f, 5.4f,
144  3.0f, 5.6f, 5.3f, 5.4f, 2.8f, 4.2f,
145 
146  4.1f, 6.1f, 4.4f, 7.0f, 2.8f, 6.2f,
147  2.4f, 8.7f, 4.7f, 0.8f, 5.3f, 5.6f,
148  };
149 
150  return ElementwiseTestHelper<5, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
151  workloadFactory,
152  memoryManager,
153  shape,
154  input1,
155  shape,
156  input2,
157  shape,
158  output);
159 }

◆ AdditionAfterMaxPoolTest()

LayerTestResult<float, 4> AdditionAfterMaxPoolTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 516 of file AdditionTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateAddition(), IWorkloadFactory::CreatePooling2d(), IWorkloadFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, Pooling2dDescriptor::m_PoolHeight, and armnn::Max.

Referenced by BOOST_AUTO_TEST_CASE().

519 {
520  IgnoreUnused(memoryManager);
521 
522  // Create Initial Tensor
523  // 1, 2, 3
524  // 4, 5, 6
525  // 7, 8, 9
526 
527  armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
528  armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
529 
530  boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
531  {1, 2, 3,
532  4, 5, 6,
533  7, 8, 9
534  });
536  std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
537  workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
538  std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
539  workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
541 
542  // Apply MaxPool poolSize = 1x1, stride=2x2
543  // Result =
544  // 1, 3
545  // 7, 9
546  armnn::Pooling2dDescriptor descriptor;
547  descriptor.m_PoolHeight = 1;
548  descriptor.m_PoolWidth = 1;
549  descriptor.m_StrideX = 2;
550  descriptor.m_StrideY = 2;
551  descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
552 
553  armnn::Pooling2dQueueDescriptor queueDescriptor;
554  queueDescriptor.m_Parameters = descriptor;
555  armnn::WorkloadInfo workloadInfo;
556  AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
557  AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
558 
559  // Create the MaxPool
560  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
561 
562  //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
563  auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
564  boost::multi_array<float, 4> resultMaxPool;
565  resultMaxPool.resize(shape);
566 
567 
568  // Create addition with another tensor the same size
569  // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
570  // with the initial tensor.
571  // 12, 16
572  // 24, 28
573 
574  armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
575  armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
576 
577  boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
578  {12, 16,
579  24, 28,
580  });
581 
582  // Expected output tensor after MaxPool and Addition.
583  LayerTestResult<float,4> addRet(addOutputTensorInfo);
584  addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
585  {
586  13, 19,
587  31, 37
588  }));
589 
591  std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
592  std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
594 
597 
598  // Add the output of the MaxPool and the new tensor
599  AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
600  AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
601  AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
602 
603  std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
604 
605  poolingInputHandle->Allocate();
606  poolingOutputHandle->Allocate();
607  addInputHandle->Allocate();
608  addOutputHandle->Allocate();
609 
610  CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
611  CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
612 
613  CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
614  CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
615 
616  workload->PostAllocationConfigure();
617  workload->Execute();
618  addWorkload->PostAllocationConfigure();
619  addWorkload->Execute();
620 
621  CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
622 
623  return addRet;
624 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
void IgnoreUnused(Ts &&...)
uint32_t m_PoolHeight
Pooling height value.
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
virtual std::unique_ptr< IWorkload > CreatePooling2d(const Pooling2dQueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
A Pooling2dDescriptor for the Pooling2dLayer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ AdditionBroadcast1ElementInt16Test()

LayerTestResult<int16_t, 4> AdditionBroadcast1ElementInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 374 of file AdditionTestImpl.cpp.

377 {
378  return AdditionBroadcast1ElementTestImpl<armnn::DataType::QSymmS16>(
379  workloadFactory, memoryManager, 0.1333333f, 0);
380 }

◆ AdditionBroadcast1ElementInt32Test()

LayerTestResult<int32_t, 4> AdditionBroadcast1ElementInt32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 382 of file AdditionTestImpl.cpp.

385 {
386  return AdditionBroadcast1ElementTestImpl<armnn::DataType::Signed32>(
387  workloadFactory, memoryManager, 1.f, 0);
388 }

◆ AdditionBroadcast1ElementTest()

LayerTestResult<float, 4> AdditionBroadcast1ElementTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 358 of file AdditionTestImpl.cpp.

361 {
362  return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
363  workloadFactory, memoryManager, 0.0f, 0);
364 }

◆ AdditionBroadcast1ElementTestImpl()

LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 247 of file AdditionTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateAddition(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

252 {
253  IgnoreUnused(memoryManager);
254  armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
255  armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
256  armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
257 
258  if (armnn::IsQuantizedType<T>())
259  {
260  inputTensorInfo1.SetQuantizationScale(qScale);
261  inputTensorInfo1.SetQuantizationOffset(qOffset);
262  inputTensorInfo2.SetQuantizationScale(qScale);
263  inputTensorInfo2.SetQuantizationOffset(qOffset);
264  outputTensorInfo.SetQuantizationScale(qScale);
265  outputTensorInfo.SetQuantizationOffset(qOffset);
266  }
267 
268  auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
269  {
270  0.0f, 1.0f, 2.0f,
271  3.0f, 4.0f, 5.0f,
272  6.0f, 7.0f, 8.0f,
273  9.0f, 10.0f, 11.0f,
274  12.0f, 13.0f, 14.0f,
275  15.0f, 16.0f, 17.0f,
276  },
277  qScale, qOffset));
278 
279  auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
280  {
281  0.5f,
282  },
283  qScale, qOffset));
284 
285  LayerTestResult<T,4> ret(outputTensorInfo);
286  ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
287  {
288  0.5f, 1.5f, 2.5f,
289  3.5f, 4.5f, 5.5f,
290  6.5f, 7.5f, 8.5f,
291  9.5f, 10.5f, 11.5f,
292  12.5f, 13.5f, 14.5f,
293  15.5f, 16.5f, 17.5f,
294  },
295  qScale, qOffset));
296 
298  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
299  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
300  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
302 
305  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
306  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
307  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
308 
309  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
310 
311  inputHandle1->Allocate();
312  inputHandle2->Allocate();
313  outputHandle->Allocate();
314 
315  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
316  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
317 
318  workload->PostAllocationConfigure();
319  workload->Execute();
320 
321  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
322 
323  return ret;
324 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:481
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ AdditionBroadcast1ElementUint8Test()

LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 366 of file AdditionTestImpl.cpp.

369 {
370  return AdditionBroadcast1ElementTestImpl<armnn::DataType::QAsymmU8>(
371  workloadFactory, memoryManager, 0.1333333f, 128);
372 }

◆ AdditionBroadcastInt16Test()

LayerTestResult<int16_t, 4> AdditionBroadcastInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 342 of file AdditionTestImpl.cpp.

345 {
346  return AdditionBroadcastTestImpl<armnn::DataType::QSymmS16>(
347  workloadFactory, memoryManager, 2.f, 0);
348 }

◆ AdditionBroadcastInt32Test()

LayerTestResult<int32_t, 4> AdditionBroadcastInt32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 350 of file AdditionTestImpl.cpp.

353 {
354  return AdditionBroadcastTestImpl<armnn::DataType::Signed32>(
355  workloadFactory, memoryManager, 1.f, 0);
356 }

◆ AdditionBroadcastTest()

LayerTestResult<float, 4> AdditionBroadcastTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 326 of file AdditionTestImpl.cpp.

329 {
330  return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
331  workloadFactory, memoryManager, 0.0f, 0);
332 }

◆ AdditionBroadcastTestImpl()

LayerTestResult<T, 4> AdditionBroadcastTestImpl ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
float  qScale,
int32_t  qOffset 
)

Definition at line 162 of file AdditionTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateAddition(), IWorkloadFactory::CreateTensorHandle(), armnn::IgnoreUnused(), LayerTestResult< T, n >::output, LayerTestResult< T, n >::outputExpected, TensorInfo::SetQuantizationOffset(), and TensorInfo::SetQuantizationScale().

167 {
168  IgnoreUnused(memoryManager);
169  armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
170  armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
171  armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
172 
173  if (armnn::IsQuantizedType<T>())
174  {
175  inputTensorInfo1.SetQuantizationScale(qScale);
176  inputTensorInfo1.SetQuantizationOffset(qOffset);
177  inputTensorInfo2.SetQuantizationScale(qScale);
178  inputTensorInfo2.SetQuantizationOffset(qOffset);
179  outputTensorInfo.SetQuantizationScale(qScale);
180  outputTensorInfo.SetQuantizationOffset(qOffset);
181  }
182 
183  auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
184  {
185  0.0f,
186  1.0f,
187 
188  2.0f,
189  3.0f,
190 
191  4.0f,
192  5.0f,
193  },
194  qScale, qOffset));
195 
196  auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
197  {
198  0.5f, 1.5f, 2.5f,
199  3.5f, 4.5f, 5.5f,
200  },
201  qScale, qOffset));
202 
203  LayerTestResult<T,4> ret(outputTensorInfo);
204  ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
205  {
206  0.5f, 1.5f, 2.5f,
207  4.5f, 5.5f, 6.5f,
208 
209  2.5f, 3.5f, 4.5f,
210  6.5f, 7.5f, 8.5f,
211 
212  4.5f, 5.5f, 6.5f,
213  8.5f, 9.5f, 10.5f,
214  },
215  qScale, qOffset));
216 
218  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
219  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
220  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
222 
225  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
226  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
227  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
228 
229  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
230 
231  inputHandle1->Allocate();
232  inputHandle2->Allocate();
233  outputHandle->Allocate();
234 
235  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
236  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
237 
238  workload->PostAllocationConfigure();
239  workload->Execute();
240 
241  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
242 
243  return ret;
244 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:481
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ AdditionBroadcastUint8Test()

LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 334 of file AdditionTestImpl.cpp.

337 {
338  return AdditionBroadcastTestImpl<armnn::DataType::QAsymmU8>(
339  workloadFactory, memoryManager, 2.f, 0);
340 }

◆ AdditionInt16Test()

LayerTestResult<int16_t, 4> AdditionInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 432 of file AdditionTestImpl.cpp.

435 {
436  const unsigned int shape0[] = { 1, 2, 2, 3 };
437  const unsigned int shape1[] = { 1, 2, 2, 3 };
438 
439  std::vector<int16_t> input0 =
440  {
441  63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
442  203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
443  };
444 
445  std::vector<int16_t> input1 =
446  {
447  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
448  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
449  };
450 
451  std::vector<int16_t> output =
452  {
453  84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
454  329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
455  };
456 
457  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QSymmS16>(
458  workloadFactory,
459  memoryManager,
460  shape0,
461  input0,
462  7.0f,
463  0,
464  shape1,
465  input1,
466  7.0f,
467  0,
468  shape0,
469  output,
470  7.0f,
471  0);
472 }

◆ AdditionInt32Test()

LayerTestResult<int32_t, 4> AdditionInt32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 474 of file AdditionTestImpl.cpp.

477 {
478  const unsigned int shape0[] = { 1, 2, 2, 3 };
479  const unsigned int shape1[] = { 1, 2, 2, 3 };
480 
481  std::vector<int32_t> input0 =
482  {
483  63, 35, 77, 70, 56, 112, // 441, 245, 539, 490, 392, 184
484  203, 28, 252, 168, 245, 91 // 1421, 196, 1764, 1176, 1715, 637
485  };
486 
487  std::vector<int32_t> input1 =
488  {
489  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
490  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
491  };
492 
493  std::vector<int32_t> output =
494  {
495  84, 42, 252, 301, 231, 322, // 588, 294, 1764, 2107(clamped), 1617, 2254(clamped)
496  329, 189, 315, 189, 350, 217, // 2303(clamped), 1323, 2205(clamped), 1323, 2450(clamped), 1519
497  };
498 
499  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Signed32>(
500  workloadFactory,
501  memoryManager,
502  shape0,
503  input0,
504  1.0f,
505  0,
506  shape1,
507  input1,
508  1.0f,
509  0,
510  shape0,
511  output,
512  1.0f,
513  0);
514 }

◆ AdditionTest()

LayerTestResult<float,4> AdditionTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 21 of file AdditionTestImpl.cpp.

24 {
25  unsigned int batchSize = 2u;
26  unsigned int channels = 2u;
27  unsigned int height = 2u;
28  unsigned int width = 3u;
29 
30  unsigned int shape[] = { batchSize, channels, height, width };
31 
32  std::vector<float> input1 =
33  {
34  0.0f, 2.0f, 1.0f,
35  0.2f, 1.0f, 2.0f,
36 
37  1.0f, 2.0f, 1.0f,
38  0.2f, 1.0f, 2.0f,
39 
40  0.0f, 2.0f, 1.0f,
41  4.2f, 1.0f, 2.0f,
42 
43  0.0f, 0.0f, 1.0f,
44  0.2f, 1.0f, 2.0f,
45  };
46 
47  std::vector<float> input2 =
48  {
49  1.0f, 2.0f, 1.0f,
50  0.0f, 1.0f, 2.0f,
51 
52  1.0f, 2.0f, -2.0f,
53  0.2f, 1.0f, 2.0f,
54 
55  0.0f, 2.0f, 1.0f,
56  4.2f, 0.0f, -3.0f,
57 
58  0.0f, 0.0f, 1.0f,
59  0.7f, 1.0f, 5.0f,
60  };
61 
62 
63  std::vector<float> output
64  {
65  1.0f, 4.0f, 2.0f,
66  0.2f, 2.0f, 4.0f,
67 
68  2.0f, 4.0f, -1.0f,
69  0.4f, 2.0f, 4.0f,
70 
71  0.0f, 4.0f, 2.0f,
72  8.4f, 1.0f, -1.0f,
73 
74  0.0f, 0.0f, 2.0f,
75  0.9f, 2.0f, 7.0f,
76  };
77 
78  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::Float32>(
79  workloadFactory,
80  memoryManager,
81  shape,
82  input1,
83  shape,
84  input2,
85  shape,
86  output);
87 }

◆ AdditionUint8Test()

LayerTestResult<uint8_t, 4> AdditionUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 390 of file AdditionTestImpl.cpp.

393 {
394  const unsigned int shape0[] = { 1, 2, 2, 3 };
395  const unsigned int shape1[] = { 1, 2, 2, 3 };
396 
397  std::vector<uint8_t> input0(
398  {
399  63, 35, 77, 70, 56, 112, // 420, 224, 518, 469, 371, 763
400  203, 28, 252, 168, 245, 91 // 1400, 175, 1743, 1155, 1694, 616
401  });
402 
403  std::vector<uint8_t> input1(
404  {
405  21, 7, 175, 231, 175, 210, // 126, 28, 1204, 1596, 1204, 1449
406  126, 161, 63, 21, 105, 126 // 861, 1106, 420, 126, 714, 861
407  });
408 
409  std::vector<uint8_t> output(
410  {
411  81, 39, 249, 255, 228, 255, // 546, 252, 1722, 2065(clamped), 1575, 2212(clamped)
412  255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
413  });
414 
415  return ElementwiseTestHelper<4, armnn::AdditionQueueDescriptor, armnn::DataType::QAsymmU8>(
416  workloadFactory,
417  memoryManager,
418  shape0,
419  input0,
420  7.0f,
421  3,
422  shape1,
423  input1,
424  7.0f,
425  3,
426  shape0,
427  output,
428  7.0f,
429  3);
430 }

◆ CompareAdditionTest()

LayerTestResult<float,4> CompareAdditionTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory 
)

Definition at line 626 of file AdditionTestImpl.cpp.

References ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateAddition(), IWorkloadFactory::CreateTensorHandle(), armnn::Float32, and armnn::IgnoreUnused().

630 {
631  IgnoreUnused(memoryManager);
632  unsigned int batchSize = 4;
633  unsigned int channels = 1;
634  unsigned int height = 2;
635  unsigned int width = 3;
636 
637  armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
638  armnn::TensorInfo outputTensorInfo;
639 
640  unsigned int shape[] = {batchSize, channels, height, width};
641 
642  inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
643  inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
644  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
645 
646  auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
647  auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
648 
649  LayerTestResult<float,4> ret(outputTensorInfo);
650 
652  std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
653  std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
654  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
655 
656  std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
657  std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
658  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
660 
663  AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
664  AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
665  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
666 
667  armnn::AdditionQueueDescriptor refData = data;
668  armnn::WorkloadInfo refInfo = info;
669  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
670  SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
671  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
672 
673  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
674  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
675 
676  inputHandle1->Allocate();
677  inputHandle2->Allocate();
678  outputHandle->Allocate();
679  inputHandle1Ref->Allocate();
680  inputHandle2Ref->Allocate();
681  outputHandleRef->Allocate();
682 
683  CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
684  CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
685  CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
686  CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
687 
688  workload->PostAllocationConfigure();
689  workload->Execute();
690  workloadRef->PostAllocationConfigure();
691  workloadRef->Execute();
692 
693  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
694  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
695 
696  return ret;
697 }
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)

◆ CreateWorkload< armnn::AdditionQueueDescriptor >()

std::unique_ptr<armnn::IWorkload> CreateWorkload< armnn::AdditionQueueDescriptor > ( const armnn::IWorkloadFactory workloadFactory,
const armnn::WorkloadInfo info,
const armnn::AdditionQueueDescriptor descriptor 
)

Definition at line 13 of file AdditionTestImpl.cpp.

17 {
18  return workloadFactory.CreateAddition(descriptor, info);
19 }
virtual std::unique_ptr< IWorkload > CreateAddition(const AdditionQueueDescriptor &descriptor, const WorkloadInfo &info) const