ArmNN
 20.05
BatchNormalizationTestImpl.hpp File Reference

Go to the source code of this file.

Functions

LayerTestResult< float, 4 > BatchNormFloat32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > BatchNormFloat32NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< armnn::Half, 4 > BatchNormFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< armnn::Half, 4 > BatchNormFloat16NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > BatchNormUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > BatchNormUint8NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > BatchNormInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > BatchNormInt16NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > CompareBatchNormTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory)
 

Function Documentation

◆ BatchNormFloat16NhwcTest()

LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 349 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

352 {
353  // BatchSize: 1
354  // Height: 3
355  // Width: 2
356  // Channels: 2
357 
358  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
359  std::vector<float> inputValues
360  {
361  // Batch 0, Height 0, Width (2) x Channel (2)
362  1.f, 1.f,
363  4.f, 1.f,
364 
365  // Batch 0, Height 1, Width (2) x Channel (2)
366  4.f, 4.f,
367  2.f, 1.f,
368 
369  // Batch 0, Height 2, Width (2) x Channel (2)
370  1.f, -2.f,
371  6.f, 4.f
372  };
373  std::vector<float> expectedOutputValues
374  {
375  // Batch 0, Height 0, Width (2) x Channel (2)
376  1.f, 3.f,
377  4.f, 3.f,
378 
379  // Batch 0, Height 1, Width (2) x Channel (2)
380  4.f, 4.f,
381  2.f, 3.f,
382 
383  // Batch 0, Height 2, Width (2) x Channel (2)
384  1.f, 2.f,
385  6.f, 4.f
386  };
387 
388  return BatchNormTestImpl<armnn::DataType::Float16>(
389  workloadFactory,
390  memoryManager,
391  inputOutputShape,
392  inputValues,
393  expectedOutputValues,
394  0.f,
395  0,
397 }

◆ BatchNormFloat16Test()

LayerTestResult<armnn::Half, 4> BatchNormFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 303 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

306 {
307  // BatchSize: 1
308  // Channels: 2
309  // Height: 3
310  // Width: 2
311 
312  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
313  std::vector<float> inputValues
314  {
315  // Batch 0, Channel 0, Height (3) x Width (2)
316  1.f, 4.f,
317  4.f, 2.f,
318  1.f, 6.f,
319 
320  // Batch 0, Channel 1, Height (3) x Width (2)
321  1.f, 1.f,
322  4.f, 1.f,
323  -2.f, 4.f
324  };
325  std::vector<float> expectedOutputValues
326  {
327  // Batch 0, Channel 0, Height (3) x Width (2)
328  1.f, 4.f,
329  4.f, 2.f,
330  1.f, 6.f,
331 
332  // Batch 0, Channel 1, Height (3) x Width (2)
333  3.f, 3.f,
334  4.f, 3.f,
335  2.f, 4.f
336  };
337 
338  return BatchNormTestImpl<armnn::DataType::Float16>(
339  workloadFactory,
340  memoryManager,
341  inputOutputShape,
342  inputValues,
343  expectedOutputValues,
344  0.f,
345  0,
347 }

◆ BatchNormFloat32NhwcTest()

LayerTestResult<float, 4> BatchNormFloat32NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 253 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

256 {
257  // BatchSize: 1
258  // Height: 3
259  // Width: 2
260  // Channels: 2
261 
262  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
263  std::vector<float> inputValues
264  {
265  // Batch 0, Height 0, Width (2) x Channel (2)
266  1.f, 1.f,
267  4.f, 1.f,
268 
269  // Batch 0, Height 1, Width (2) x Channel (2)
270  4.f, 4.f,
271  2.f, 1.f,
272 
273  // Batch 0, Height 2, Width (2) x Channel (2)
274  1.f, -2.f,
275  6.f, 4.f
276  };
277  std::vector<float> expectedOutputValues
278  {
279  // Batch 0, Height 0, Width (2) x Channel (2)
280  1.f, 3.f,
281  4.f, 3.f,
282 
283  // Batch 0, Height 1, Width (2) x Channel (2)
284  4.f, 4.f,
285  2.f, 3.f,
286 
287  // Batch 0, Height 2, Width (2) x Channel (2)
288  1.f, 2.f,
289  6.f, 4.f
290  };
291 
292  return BatchNormTestImpl<armnn::DataType::Float32>(
293  workloadFactory,
294  memoryManager,
295  inputOutputShape,
296  inputValues,
297  expectedOutputValues,
298  0.f,
299  0,
301 }

◆ BatchNormFloat32Test()

LayerTestResult<float, 4> BatchNormFloat32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 207 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

210 {
211  // BatchSize: 1
212  // Channels: 2
213  // Height: 3
214  // Width: 2
215 
216  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
217  std::vector<float> inputValues
218  {
219  // Batch 0, Channel 0, Height (3) x Width (2)
220  1.f, 4.f,
221  4.f, 2.f,
222  1.f, 6.f,
223 
224  // Batch 0, Channel 1, Height (3) x Width (2)
225  1.f, 1.f,
226  4.f, 1.f,
227  -2.f, 4.f
228  };
229  std::vector<float> expectedOutputValues
230  {
231  // Batch 0, Channel 0, Height (3) x Width (2)
232  1.f, 4.f,
233  4.f, 2.f,
234  1.f, 6.f,
235 
236  // Batch 0, Channel 1, Height (3) x Width (2)
237  3.f, 3.f,
238  4.f, 3.f,
239  2.f, 4.f
240  };
241 
242  return BatchNormTestImpl<armnn::DataType::Float32>(
243  workloadFactory,
244  memoryManager,
245  inputOutputShape,
246  inputValues,
247  expectedOutputValues,
248  0.f,
249  0,
251 }

◆ BatchNormInt16NhwcTest()

LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 537 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

540 {
541  // BatchSize: 1
542  // Height: 3
543  // Width: 2
544  // Channels: 2
545 
546  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
547  std::vector<float> inputValues
548  {
549  // Batch 0, Height 0, Width (2) x Channel (2)
550  1.f, 1.f,
551  4.f, 1.f,
552 
553  // Batch 0, Height 1, Width (2) x Channel (2)
554  4.f, 4.f,
555  2.f, 1.f,
556 
557  // Batch 0, Height 2, Width (2) x Channel (2)
558  1.f, -2.f,
559  6.f, 4.f
560  };
561  std::vector<float> expectedOutputValues
562  {
563  // Batch 0, Height 0, Width (2) x Channel (2)
564  1.f, 3.f,
565  4.f, 3.f,
566 
567  // Batch 0, Height 1, Width (2) x Channel (2)
568  4.f, 4.f,
569  2.f, 3.f,
570 
571  // Batch 0, Height 2, Width (2) x Channel (2)
572  1.f, 2.f,
573  6.f, 4.f
574  };
575 
576  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
577  workloadFactory,
578  memoryManager,
579  inputOutputShape,
580  inputValues,
581  expectedOutputValues,
582  1.f / 20.f,
583  50,
585 }

◆ BatchNormInt16Test()

LayerTestResult<int16_t, 4> BatchNormInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 491 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

494 {
495  // BatchSize: 1
496  // Channels: 2
497  // Height: 3
498  // Width: 2
499 
500  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
501  std::vector<float> inputValues
502  {
503  // Batch 0, Channel 0, Height (3) x Width (2)
504  1.f, 4.f,
505  4.f, 2.f,
506  1.f, 6.f,
507 
508  // Batch 0, Channel 1, Height (3) x Width (2)
509  1.f, 1.f,
510  4.f, 1.f,
511  -2.f, 4.f
512  };
513  std::vector<float> expectedOutputValues
514  {
515  // Batch 0, Channel 0, Height (3) x Width (2)
516  1.f, 4.f,
517  4.f, 2.f,
518  1.f, 6.f,
519 
520  // Batch 0, Channel 1, Height (3) x Width (2)
521  3.f, 3.f,
522  4.f, 3.f,
523  2.f, 4.f
524  };
525 
526  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
527  workloadFactory,
528  memoryManager,
529  inputOutputShape,
530  inputValues,
531  expectedOutputValues,
532  1.f / 20.f,
533  50,
535 }

◆ BatchNormUint8NhwcTest()

LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 445 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

448 {
449  // BatchSize: 1
450  // Height: 3
451  // Width: 2
452  // Channels: 2
453 
454  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
455  std::vector<float> inputValues
456  {
457  // Batch 0, Height 0, Width (2) x Channel (2)
458  1.f, 1.f,
459  4.f, 1.f,
460 
461  // Batch 0, Height 1, Width (2) x Channel (2)
462  4.f, 4.f,
463  2.f, 1.f,
464 
465  // Batch 0, Height 2, Width (2) x Channel (2)
466  1.f, -2.f,
467  6.f, 4.f
468  };
469  std::vector<float> expectedOutputValues
470  {
471  // Batch 0, Height 0, Width (2) x Channel (2)
472  1.f, 3.f,
473  4.f, 3.f,
474 
475  // Batch 0, Height 1, Width (2) x Channel (2)
476  4.f, 4.f,
477  2.f, 3.f,
478 
479  // Batch 0, Height 2, Width (2) x Channel (2)
480  1.f, 2.f,
481  6.f, 4.f
482  };
483 
484  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
485  workloadFactory,
486  memoryManager,
487  inputOutputShape, inputValues, expectedOutputValues,
488  1.f/20.f, 50, armnn::DataLayout::NHWC);
489 }

◆ BatchNormUint8Test()

LayerTestResult<uint8_t, 4> BatchNormUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 399 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

402 {
403  // BatchSize: 1
404  // Channels: 2
405  // Height: 3
406  // Width: 2
407 
408  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
409  std::vector<float> inputValues
410  {
411  // Batch 0, Channel 0, Height (3) x Width (2)
412  1.f, 4.f,
413  4.f, 2.f,
414  1.f, 6.f,
415 
416  // Batch 0, Channel 1, Height (3) x Width (2)
417  1.f, 1.f,
418  4.f, 1.f,
419  -2.f, 4.f
420  };
421  std::vector<float> expectedOutputValues
422  {
423  // Batch 0, Channel 0, Height (3) x Width (2)
424  1.f, 4.f,
425  4.f, 2.f,
426  1.f, 6.f,
427 
428  // Batch 0, Channel 1, Height (3) x Width (2)
429  3.f, 3.f,
430  4.f, 3.f,
431  2.f, 4.f
432  };
433 
434  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
435  workloadFactory,
436  memoryManager,
437  inputOutputShape,
438  inputValues,
439  expectedOutputValues,
440  1.f / 20.f,
441  50,
443 }

◆ CompareBatchNormTest()

LayerTestResult<float, 4> CompareBatchNormTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory 
)

Definition at line 587 of file BatchNormalizationTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateBatchNormalization(), IWorkloadFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), BatchNormalizationQueueDescriptor::m_Beta, BatchNormalizationDescriptor::m_Eps, BatchNormalizationQueueDescriptor::m_Gamma, BatchNormalizationQueueDescriptor::m_Mean, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and BatchNormalizationQueueDescriptor::m_Variance.

591 {
592  IgnoreUnused(memoryManager);
593  const unsigned int width = 2;
594  const unsigned int height = 3;
595  const unsigned int channels = 5;
596  const unsigned int batchSize = 3;
597 
598  armnn::TensorInfo inputTensorInfo;
599  armnn::TensorInfo outputTensorInfo;
600  armnn::TensorInfo tensorInfo;
601 
602  constexpr unsigned int shape[] = {batchSize, channels, height, width};
603  constexpr unsigned int tensorShape[] = {channels};
604 
605  inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
606  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
607  tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
608 
609  auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
610 
611  auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
612  auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
613  auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
614  auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
615 
616  LayerTestResult<float,4> ret(outputTensorInfo);
617 
618  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
619  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
620 
621  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
622  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
623 
626  armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
627  armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
628  armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
629  armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
630 
631  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
632  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
633  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
634  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
635 
636  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
637  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
638  data.m_Mean = &meanTensor;
639  data.m_Variance = &varianceTensor;
640  data.m_Beta = &betaTensor;
641  data.m_Gamma = &gammaTensor;
642  data.m_Parameters.m_Eps = 0.01f;
643 
645  armnn::WorkloadInfo refInfo = info;
646  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
647  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
648 
649  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
650  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
651 
652  inputHandle->Allocate();
653  outputHandle->Allocate();
654  inputHandleRef->Allocate();
655  outputHandleRef->Allocate();
656 
657  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
658  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
659 
660  workload->PostAllocationConfigure();
661  workload->Execute();
662  workloadRef->PostAllocationConfigure();
663  workloadRef->Execute();
664 
665  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
666  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
667 
668  return ret;
669 }
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
const ConstCpuTensorHandle * m_Gamma
const ConstCpuTensorHandle * m_Beta
const ConstCpuTensorHandle * m_Mean
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
const ConstCpuTensorHandle * m_Variance
void IgnoreUnused(Ts &&...)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)