ArmNN
 21.11
BatchNormalizationTestImpl.cpp File Reference

Go to the source code of this file.

Functions

LayerTestResult< float, 4 > BatchNormFloat32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > BatchNormFloat32NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< armnn::Half, 4 > BatchNormFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< armnn::Half, 4 > BatchNormFloat16NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > BatchNormUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > BatchNormUint8NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > BatchNormInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > BatchNormInt16NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > CompareBatchNormTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
 

Function Documentation

◆ BatchNormFloat16NhwcTest()

LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 362 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

Referenced by TEST_SUITE().

366 {
367  // BatchSize: 1
368  // Height: 3
369  // Width: 2
370  // Channels: 2
371 
372  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
373  std::vector<float> inputValues
374  {
375  // Batch 0, Height 0, Width (2) x Channel (2)
376  1.f, 1.f,
377  4.f, 1.f,
378 
379  // Batch 0, Height 1, Width (2) x Channel (2)
380  4.f, 4.f,
381  2.f, 1.f,
382 
383  // Batch 0, Height 2, Width (2) x Channel (2)
384  1.f, -2.f,
385  6.f, 4.f
386  };
387  std::vector<float> expectedOutputValues
388  {
389  // Batch 0, Height 0, Width (2) x Channel (2)
390  1.f, 3.f,
391  4.f, 3.f,
392 
393  // Batch 0, Height 1, Width (2) x Channel (2)
394  4.f, 4.f,
395  2.f, 3.f,
396 
397  // Batch 0, Height 2, Width (2) x Channel (2)
398  1.f, 2.f,
399  6.f, 4.f
400  };
401 
402  return BatchNormTestImpl<armnn::DataType::Float16>(
403  workloadFactory,
404  memoryManager,
405  tensorHandleFactory,
406  inputOutputShape,
407  inputValues,
408  expectedOutputValues,
409  0.f,
410  0,
412 }

◆ BatchNormFloat16Test()

LayerTestResult<armnn::Half, 4> BatchNormFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 314 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

Referenced by TEST_SUITE().

318 {
319  // BatchSize: 1
320  // Channels: 2
321  // Height: 3
322  // Width: 2
323 
324  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
325  std::vector<float> inputValues
326  {
327  // Batch 0, Channel 0, Height (3) x Width (2)
328  1.f, 4.f,
329  4.f, 2.f,
330  1.f, 6.f,
331 
332  // Batch 0, Channel 1, Height (3) x Width (2)
333  1.f, 1.f,
334  4.f, 1.f,
335  -2.f, 4.f
336  };
337  std::vector<float> expectedOutputValues
338  {
339  // Batch 0, Channel 0, Height (3) x Width (2)
340  1.f, 4.f,
341  4.f, 2.f,
342  1.f, 6.f,
343 
344  // Batch 0, Channel 1, Height (3) x Width (2)
345  3.f, 3.f,
346  4.f, 3.f,
347  2.f, 4.f
348  };
349 
350  return BatchNormTestImpl<armnn::DataType::Float16>(
351  workloadFactory,
352  memoryManager,
353  tensorHandleFactory,
354  inputOutputShape,
355  inputValues,
356  expectedOutputValues,
357  0.f,
358  0,
360 }

◆ BatchNormFloat32NhwcTest()

LayerTestResult<float, 4> BatchNormFloat32NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 262 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

Referenced by TEST_SUITE().

266 {
267  // BatchSize: 1
268  // Height: 3
269  // Width: 2
270  // Channels: 2
271 
272  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
273  std::vector<float> inputValues
274  {
275  // Batch 0, Height 0, Width (2) x Channel (2)
276  1.f, 1.f,
277  4.f, 1.f,
278 
279  // Batch 0, Height 1, Width (2) x Channel (2)
280  4.f, 4.f,
281  2.f, 1.f,
282 
283  // Batch 0, Height 2, Width (2) x Channel (2)
284  1.f, -2.f,
285  6.f, 4.f
286  };
287  std::vector<float> expectedOutputValues
288  {
289  // Batch 0, Height 0, Width (2) x Channel (2)
290  1.f, 3.f,
291  4.f, 3.f,
292 
293  // Batch 0, Height 1, Width (2) x Channel (2)
294  4.f, 4.f,
295  2.f, 3.f,
296 
297  // Batch 0, Height 2, Width (2) x Channel (2)
298  1.f, 2.f,
299  6.f, 4.f
300  };
301 
302  return BatchNormTestImpl<armnn::DataType::Float32>(
303  workloadFactory,
304  memoryManager,
305  tensorHandleFactory,
306  inputOutputShape,
307  inputValues,
308  expectedOutputValues,
309  0.f,
310  0,
312 }

◆ BatchNormFloat32Test()

LayerTestResult<float, 4> BatchNormFloat32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 214 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

Referenced by TEST_SUITE().

218 {
219  // BatchSize: 1
220  // Channels: 2
221  // Height: 3
222  // Width: 2
223 
224  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
225  std::vector<float> inputValues
226  {
227  // Batch 0, Channel 0, Height (3) x Width (2)
228  1.f, 4.f,
229  4.f, 2.f,
230  1.f, 6.f,
231 
232  // Batch 0, Channel 1, Height (3) x Width (2)
233  1.f, 1.f,
234  4.f, 1.f,
235  -2.f, 4.f
236  };
237  std::vector<float> expectedOutputValues
238  {
239  // Batch 0, Channel 0, Height (3) x Width (2)
240  1.f, 4.f,
241  4.f, 2.f,
242  1.f, 6.f,
243 
244  // Batch 0, Channel 1, Height (3) x Width (2)
245  3.f, 3.f,
246  4.f, 3.f,
247  2.f, 4.f
248  };
249 
250  return BatchNormTestImpl<armnn::DataType::Float32>(
251  workloadFactory,
252  memoryManager,
253  tensorHandleFactory,
254  inputOutputShape,
255  inputValues,
256  expectedOutputValues,
257  0.f,
258  0,
260 }

◆ BatchNormInt16NhwcTest()

LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 558 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

Referenced by TEST_SUITE().

562 {
563  // BatchSize: 1
564  // Height: 3
565  // Width: 2
566  // Channels: 2
567 
568  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
569  std::vector<float> inputValues
570  {
571  // Batch 0, Height 0, Width (2) x Channel (2)
572  1.f, 1.f,
573  4.f, 1.f,
574 
575  // Batch 0, Height 1, Width (2) x Channel (2)
576  4.f, 4.f,
577  2.f, 1.f,
578 
579  // Batch 0, Height 2, Width (2) x Channel (2)
580  1.f, -2.f,
581  6.f, 4.f
582  };
583  std::vector<float> expectedOutputValues
584  {
585  // Batch 0, Height 0, Width (2) x Channel (2)
586  1.f, 3.f,
587  4.f, 3.f,
588 
589  // Batch 0, Height 1, Width (2) x Channel (2)
590  4.f, 4.f,
591  2.f, 3.f,
592 
593  // Batch 0, Height 2, Width (2) x Channel (2)
594  1.f, 2.f,
595  6.f, 4.f
596  };
597 
598  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
599  workloadFactory,
600  memoryManager,
601  tensorHandleFactory,
602  inputOutputShape,
603  inputValues,
604  expectedOutputValues,
605  1.f / 20.f,
606  50,
608 }

◆ BatchNormInt16Test()

LayerTestResult<int16_t, 4> BatchNormInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 510 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

Referenced by TEST_SUITE().

514 {
515  // BatchSize: 1
516  // Channels: 2
517  // Height: 3
518  // Width: 2
519 
520  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
521  std::vector<float> inputValues
522  {
523  // Batch 0, Channel 0, Height (3) x Width (2)
524  1.f, 4.f,
525  4.f, 2.f,
526  1.f, 6.f,
527 
528  // Batch 0, Channel 1, Height (3) x Width (2)
529  1.f, 1.f,
530  4.f, 1.f,
531  -2.f, 4.f
532  };
533  std::vector<float> expectedOutputValues
534  {
535  // Batch 0, Channel 0, Height (3) x Width (2)
536  1.f, 4.f,
537  4.f, 2.f,
538  1.f, 6.f,
539 
540  // Batch 0, Channel 1, Height (3) x Width (2)
541  3.f, 3.f,
542  4.f, 3.f,
543  2.f, 4.f
544  };
545 
546  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
547  workloadFactory,
548  memoryManager,
549  tensorHandleFactory,
550  inputOutputShape,
551  inputValues,
552  expectedOutputValues,
553  1.f / 20.f,
554  50,
556 }

◆ BatchNormUint8NhwcTest()

LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 462 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

Referenced by TEST_SUITE().

466 {
467  // BatchSize: 1
468  // Height: 3
469  // Width: 2
470  // Channels: 2
471 
472  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
473  std::vector<float> inputValues
474  {
475  // Batch 0, Height 0, Width (2) x Channel (2)
476  1.f, 1.f,
477  4.f, 1.f,
478 
479  // Batch 0, Height 1, Width (2) x Channel (2)
480  4.f, 4.f,
481  2.f, 1.f,
482 
483  // Batch 0, Height 2, Width (2) x Channel (2)
484  1.f, -2.f,
485  6.f, 4.f
486  };
487  std::vector<float> expectedOutputValues
488  {
489  // Batch 0, Height 0, Width (2) x Channel (2)
490  1.f, 3.f,
491  4.f, 3.f,
492 
493  // Batch 0, Height 1, Width (2) x Channel (2)
494  4.f, 4.f,
495  2.f, 3.f,
496 
497  // Batch 0, Height 2, Width (2) x Channel (2)
498  1.f, 2.f,
499  6.f, 4.f
500  };
501 
502  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
503  workloadFactory,
504  memoryManager,
505  tensorHandleFactory,
506  inputOutputShape, inputValues, expectedOutputValues,
507  1.f/20.f, 50, armnn::DataLayout::NHWC);
508 }

◆ BatchNormUint8Test()

LayerTestResult<uint8_t, 4> BatchNormUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 414 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

Referenced by TEST_SUITE().

418 {
419  // BatchSize: 1
420  // Channels: 2
421  // Height: 3
422  // Width: 2
423 
424  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
425  std::vector<float> inputValues
426  {
427  // Batch 0, Channel 0, Height (3) x Width (2)
428  1.f, 4.f,
429  4.f, 2.f,
430  1.f, 6.f,
431 
432  // Batch 0, Channel 1, Height (3) x Width (2)
433  1.f, 1.f,
434  4.f, 1.f,
435  -2.f, 4.f
436  };
437  std::vector<float> expectedOutputValues
438  {
439  // Batch 0, Channel 0, Height (3) x Width (2)
440  1.f, 4.f,
441  4.f, 2.f,
442  1.f, 6.f,
443 
444  // Batch 0, Channel 1, Height (3) x Width (2)
445  3.f, 3.f,
446  4.f, 3.f,
447  2.f, 4.f
448  };
449 
450  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
451  workloadFactory,
452  memoryManager,
453  tensorHandleFactory,
454  inputOutputShape,
455  inputValues,
456  expectedOutputValues,
457  1.f / 20.f,
458  50,
460 }

◆ CompareBatchNormTest()

LayerTestResult<float,4> CompareBatchNormTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory 
)

Definition at line 610 of file BatchNormalizationTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateBatchNormalization(), ITensorHandleFactory::CreateTensorHandle(), armnn::Float32, TensorInfo::GetNumElements(), TensorInfo::GetShape(), armnn::IgnoreUnused(), BatchNormalizationQueueDescriptor::m_Beta, BatchNormalizationDescriptor::m_Eps, BatchNormalizationQueueDescriptor::m_Gamma, BatchNormalizationQueueDescriptor::m_Mean, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and BatchNormalizationQueueDescriptor::m_Variance.

Referenced by TEST_SUITE().

616 {
617  IgnoreUnused(memoryManager);
618  const unsigned int width = 2;
619  const unsigned int height = 3;
620  const unsigned int channels = 5;
621  const unsigned int batchSize = 3;
622 
623  armnn::TensorInfo inputTensorInfo;
624  armnn::TensorInfo outputTensorInfo;
625  armnn::TensorInfo tensorInfo;
626 
627  constexpr unsigned int shape[] = {batchSize, channels, height, width};
628  constexpr unsigned int tensorShape[] = {channels};
629 
630  inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
631  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
632  tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
633 
634  auto input = MakeRandomTensor<float>(inputTensorInfo, 21312);
635 
636  auto mean = MakeRandomTensor<float>(tensorInfo, 123);
637  auto variance = MakeRandomTensor<float>(tensorInfo, 234, 0.0f);
638  auto beta = MakeRandomTensor<float>(tensorInfo, 123);
639  auto gamma = MakeRandomTensor<float>(tensorInfo, 345);
640 
641  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
642  std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
643 
644  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
645  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
646 
647  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
648  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
649 
652  armnn::ScopedTensorHandle meanTensor(tensorInfo);
653  armnn::ScopedTensorHandle varianceTensor(tensorInfo);
654  armnn::ScopedTensorHandle betaTensor(tensorInfo);
655  armnn::ScopedTensorHandle gammaTensor(tensorInfo);
656 
657  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
658  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
659  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
660  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
661 
662  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
663  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
664  data.m_Mean = &meanTensor;
665  data.m_Variance = &varianceTensor;
666  data.m_Beta = &betaTensor;
667  data.m_Gamma = &gammaTensor;
668  data.m_Parameters.m_Eps = 0.01f;
669 
671  armnn::WorkloadInfo refInfo = info;
672  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
673  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
674 
675  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
676  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
677 
678  inputHandle->Allocate();
679  outputHandle->Allocate();
680  inputHandleRef->Allocate();
681  outputHandleRef->Allocate();
682 
683  CopyDataToITensorHandle(inputHandle.get(), input.data());
684  CopyDataToITensorHandle(inputHandleRef.get(), input.data());
685 
686  workload->PostAllocationConfigure();
687  workload->Execute();
688  workloadRef->PostAllocationConfigure();
689  workloadRef->Execute();
690 
691  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
692  CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
693 
694  return LayerTestResult<float, 4>(actualOutput,
695  expectedOutput,
696  outputHandle->GetShape(),
697  outputTensorInfo.GetShape());
698 }
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
const ConstTensorHandle * m_Variance
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
void IgnoreUnused(Ts &&...)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)