ArmNN
 22.02
BatchNormalizationTestImpl.cpp File Reference

Go to the source code of this file.

Functions

LayerTestResult< float, 4 > BatchNormFloat32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > BatchNormFloat32NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< armnn::Half, 4 > BatchNormFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< armnn::Half, 4 > BatchNormFloat16NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > BatchNormUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > BatchNormUint8NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > BatchNormInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > BatchNormInt16NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > CompareBatchNormTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
 

Function Documentation

◆ BatchNormFloat16NhwcTest()

LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 364 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

Referenced by TEST_SUITE().

368 {
369  // BatchSize: 1
370  // Height: 3
371  // Width: 2
372  // Channels: 2
373 
374  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
375  std::vector<float> inputValues
376  {
377  // Batch 0, Height 0, Width (2) x Channel (2)
378  1.f, 1.f,
379  4.f, 1.f,
380 
381  // Batch 0, Height 1, Width (2) x Channel (2)
382  4.f, 4.f,
383  2.f, 1.f,
384 
385  // Batch 0, Height 2, Width (2) x Channel (2)
386  1.f, -2.f,
387  6.f, 4.f
388  };
389  std::vector<float> expectedOutputValues
390  {
391  // Batch 0, Height 0, Width (2) x Channel (2)
392  1.f, 3.f,
393  4.f, 3.f,
394 
395  // Batch 0, Height 1, Width (2) x Channel (2)
396  4.f, 4.f,
397  2.f, 3.f,
398 
399  // Batch 0, Height 2, Width (2) x Channel (2)
400  1.f, 2.f,
401  6.f, 4.f
402  };
403 
404  return BatchNormTestImpl<armnn::DataType::Float16>(
405  workloadFactory,
406  memoryManager,
407  tensorHandleFactory,
408  inputOutputShape,
409  inputValues,
410  expectedOutputValues,
411  0.f,
412  0,
414 }

◆ BatchNormFloat16Test()

LayerTestResult<armnn::Half, 4> BatchNormFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 316 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

Referenced by TEST_SUITE().

320 {
321  // BatchSize: 1
322  // Channels: 2
323  // Height: 3
324  // Width: 2
325 
326  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
327  std::vector<float> inputValues
328  {
329  // Batch 0, Channel 0, Height (3) x Width (2)
330  1.f, 4.f,
331  4.f, 2.f,
332  1.f, 6.f,
333 
334  // Batch 0, Channel 1, Height (3) x Width (2)
335  1.f, 1.f,
336  4.f, 1.f,
337  -2.f, 4.f
338  };
339  std::vector<float> expectedOutputValues
340  {
341  // Batch 0, Channel 0, Height (3) x Width (2)
342  1.f, 4.f,
343  4.f, 2.f,
344  1.f, 6.f,
345 
346  // Batch 0, Channel 1, Height (3) x Width (2)
347  3.f, 3.f,
348  4.f, 3.f,
349  2.f, 4.f
350  };
351 
352  return BatchNormTestImpl<armnn::DataType::Float16>(
353  workloadFactory,
354  memoryManager,
355  tensorHandleFactory,
356  inputOutputShape,
357  inputValues,
358  expectedOutputValues,
359  0.f,
360  0,
362 }

◆ BatchNormFloat32NhwcTest()

LayerTestResult<float, 4> BatchNormFloat32NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 264 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

Referenced by TEST_SUITE().

268 {
269  // BatchSize: 1
270  // Height: 3
271  // Width: 2
272  // Channels: 2
273 
274  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
275  std::vector<float> inputValues
276  {
277  // Batch 0, Height 0, Width (2) x Channel (2)
278  1.f, 1.f,
279  4.f, 1.f,
280 
281  // Batch 0, Height 1, Width (2) x Channel (2)
282  4.f, 4.f,
283  2.f, 1.f,
284 
285  // Batch 0, Height 2, Width (2) x Channel (2)
286  1.f, -2.f,
287  6.f, 4.f
288  };
289  std::vector<float> expectedOutputValues
290  {
291  // Batch 0, Height 0, Width (2) x Channel (2)
292  1.f, 3.f,
293  4.f, 3.f,
294 
295  // Batch 0, Height 1, Width (2) x Channel (2)
296  4.f, 4.f,
297  2.f, 3.f,
298 
299  // Batch 0, Height 2, Width (2) x Channel (2)
300  1.f, 2.f,
301  6.f, 4.f
302  };
303 
304  return BatchNormTestImpl<armnn::DataType::Float32>(
305  workloadFactory,
306  memoryManager,
307  tensorHandleFactory,
308  inputOutputShape,
309  inputValues,
310  expectedOutputValues,
311  0.f,
312  0,
314 }

◆ BatchNormFloat32Test()

LayerTestResult<float, 4> BatchNormFloat32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 216 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

Referenced by TEST_SUITE().

220 {
221  // BatchSize: 1
222  // Channels: 2
223  // Height: 3
224  // Width: 2
225 
226  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
227  std::vector<float> inputValues
228  {
229  // Batch 0, Channel 0, Height (3) x Width (2)
230  1.f, 4.f,
231  4.f, 2.f,
232  1.f, 6.f,
233 
234  // Batch 0, Channel 1, Height (3) x Width (2)
235  1.f, 1.f,
236  4.f, 1.f,
237  -2.f, 4.f
238  };
239  std::vector<float> expectedOutputValues
240  {
241  // Batch 0, Channel 0, Height (3) x Width (2)
242  1.f, 4.f,
243  4.f, 2.f,
244  1.f, 6.f,
245 
246  // Batch 0, Channel 1, Height (3) x Width (2)
247  3.f, 3.f,
248  4.f, 3.f,
249  2.f, 4.f
250  };
251 
252  return BatchNormTestImpl<armnn::DataType::Float32>(
253  workloadFactory,
254  memoryManager,
255  tensorHandleFactory,
256  inputOutputShape,
257  inputValues,
258  expectedOutputValues,
259  0.f,
260  0,
262 }

◆ BatchNormInt16NhwcTest()

LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 560 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

Referenced by TEST_SUITE().

564 {
565  // BatchSize: 1
566  // Height: 3
567  // Width: 2
568  // Channels: 2
569 
570  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
571  std::vector<float> inputValues
572  {
573  // Batch 0, Height 0, Width (2) x Channel (2)
574  1.f, 1.f,
575  4.f, 1.f,
576 
577  // Batch 0, Height 1, Width (2) x Channel (2)
578  4.f, 4.f,
579  2.f, 1.f,
580 
581  // Batch 0, Height 2, Width (2) x Channel (2)
582  1.f, -2.f,
583  6.f, 4.f
584  };
585  std::vector<float> expectedOutputValues
586  {
587  // Batch 0, Height 0, Width (2) x Channel (2)
588  1.f, 3.f,
589  4.f, 3.f,
590 
591  // Batch 0, Height 1, Width (2) x Channel (2)
592  4.f, 4.f,
593  2.f, 3.f,
594 
595  // Batch 0, Height 2, Width (2) x Channel (2)
596  1.f, 2.f,
597  6.f, 4.f
598  };
599 
600  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
601  workloadFactory,
602  memoryManager,
603  tensorHandleFactory,
604  inputOutputShape,
605  inputValues,
606  expectedOutputValues,
607  1.f / 20.f,
608  50,
610 }

◆ BatchNormInt16Test()

LayerTestResult<int16_t, 4> BatchNormInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 512 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

Referenced by TEST_SUITE().

516 {
517  // BatchSize: 1
518  // Channels: 2
519  // Height: 3
520  // Width: 2
521 
522  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
523  std::vector<float> inputValues
524  {
525  // Batch 0, Channel 0, Height (3) x Width (2)
526  1.f, 4.f,
527  4.f, 2.f,
528  1.f, 6.f,
529 
530  // Batch 0, Channel 1, Height (3) x Width (2)
531  1.f, 1.f,
532  4.f, 1.f,
533  -2.f, 4.f
534  };
535  std::vector<float> expectedOutputValues
536  {
537  // Batch 0, Channel 0, Height (3) x Width (2)
538  1.f, 4.f,
539  4.f, 2.f,
540  1.f, 6.f,
541 
542  // Batch 0, Channel 1, Height (3) x Width (2)
543  3.f, 3.f,
544  4.f, 3.f,
545  2.f, 4.f
546  };
547 
548  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
549  workloadFactory,
550  memoryManager,
551  tensorHandleFactory,
552  inputOutputShape,
553  inputValues,
554  expectedOutputValues,
555  1.f / 20.f,
556  50,
558 }

◆ BatchNormUint8NhwcTest()

LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 464 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

Referenced by TEST_SUITE().

468 {
469  // BatchSize: 1
470  // Height: 3
471  // Width: 2
472  // Channels: 2
473 
474  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
475  std::vector<float> inputValues
476  {
477  // Batch 0, Height 0, Width (2) x Channel (2)
478  1.f, 1.f,
479  4.f, 1.f,
480 
481  // Batch 0, Height 1, Width (2) x Channel (2)
482  4.f, 4.f,
483  2.f, 1.f,
484 
485  // Batch 0, Height 2, Width (2) x Channel (2)
486  1.f, -2.f,
487  6.f, 4.f
488  };
489  std::vector<float> expectedOutputValues
490  {
491  // Batch 0, Height 0, Width (2) x Channel (2)
492  1.f, 3.f,
493  4.f, 3.f,
494 
495  // Batch 0, Height 1, Width (2) x Channel (2)
496  4.f, 4.f,
497  2.f, 3.f,
498 
499  // Batch 0, Height 2, Width (2) x Channel (2)
500  1.f, 2.f,
501  6.f, 4.f
502  };
503 
504  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
505  workloadFactory,
506  memoryManager,
507  tensorHandleFactory,
508  inputOutputShape, inputValues, expectedOutputValues,
509  1.f/20.f, 50, armnn::DataLayout::NHWC);
510 }

◆ BatchNormUint8Test()

LayerTestResult<uint8_t, 4> BatchNormUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 416 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

Referenced by TEST_SUITE().

420 {
421  // BatchSize: 1
422  // Channels: 2
423  // Height: 3
424  // Width: 2
425 
426  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
427  std::vector<float> inputValues
428  {
429  // Batch 0, Channel 0, Height (3) x Width (2)
430  1.f, 4.f,
431  4.f, 2.f,
432  1.f, 6.f,
433 
434  // Batch 0, Channel 1, Height (3) x Width (2)
435  1.f, 1.f,
436  4.f, 1.f,
437  -2.f, 4.f
438  };
439  std::vector<float> expectedOutputValues
440  {
441  // Batch 0, Channel 0, Height (3) x Width (2)
442  1.f, 4.f,
443  4.f, 2.f,
444  1.f, 6.f,
445 
446  // Batch 0, Channel 1, Height (3) x Width (2)
447  3.f, 3.f,
448  4.f, 3.f,
449  2.f, 4.f
450  };
451 
452  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
453  workloadFactory,
454  memoryManager,
455  tensorHandleFactory,
456  inputOutputShape,
457  inputValues,
458  expectedOutputValues,
459  1.f / 20.f,
460  50,
462 }

◆ CompareBatchNormTest()

LayerTestResult<float,4> CompareBatchNormTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory 
)

Definition at line 612 of file BatchNormalizationTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), armnn::BatchNormalization, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), ITensorHandleFactory::CreateTensorHandle(), IWorkloadFactory::CreateWorkload(), armnn::Float32, TensorInfo::GetNumElements(), TensorInfo::GetShape(), armnn::IgnoreUnused(), BatchNormalizationQueueDescriptor::m_Beta, BatchNormalizationDescriptor::m_Eps, BatchNormalizationQueueDescriptor::m_Gamma, BatchNormalizationQueueDescriptor::m_Mean, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and BatchNormalizationQueueDescriptor::m_Variance.

Referenced by TEST_SUITE().

618 {
619  IgnoreUnused(memoryManager);
620  const unsigned int width = 2;
621  const unsigned int height = 3;
622  const unsigned int channels = 5;
623  const unsigned int batchSize = 3;
624 
625  armnn::TensorInfo inputTensorInfo;
626  armnn::TensorInfo outputTensorInfo;
627  armnn::TensorInfo tensorInfo;
628 
629  constexpr unsigned int shape[] = {batchSize, channels, height, width};
630  constexpr unsigned int tensorShape[] = {channels};
631 
632  inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
633  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
634  tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
635 
636  auto input = MakeRandomTensor<float>(inputTensorInfo, 21312);
637 
638  auto mean = MakeRandomTensor<float>(tensorInfo, 123);
639  auto variance = MakeRandomTensor<float>(tensorInfo, 234, 0.0f);
640  auto beta = MakeRandomTensor<float>(tensorInfo, 123);
641  auto gamma = MakeRandomTensor<float>(tensorInfo, 345);
642 
643  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
644  std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
645 
646  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
647  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
648 
649  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
650  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
651 
654  armnn::ScopedTensorHandle meanTensor(tensorInfo);
655  armnn::ScopedTensorHandle varianceTensor(tensorInfo);
656  armnn::ScopedTensorHandle betaTensor(tensorInfo);
657  armnn::ScopedTensorHandle gammaTensor(tensorInfo);
658 
659  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
660  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
661  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
662  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
663 
664  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
665  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
666  data.m_Mean = &meanTensor;
667  data.m_Variance = &varianceTensor;
668  data.m_Beta = &betaTensor;
669  data.m_Gamma = &gammaTensor;
670  data.m_Parameters.m_Eps = 0.01f;
671 
673  armnn::WorkloadInfo refInfo = info;
674  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
675  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
676 
677  std::unique_ptr<armnn::IWorkload> workload
678  = workloadFactory.CreateWorkload(armnn::LayerType::BatchNormalization, data, info);
679  std::unique_ptr<armnn::IWorkload> workloadRef
680  = refWorkloadFactory.CreateWorkload(armnn::LayerType::BatchNormalization, refData, refInfo);
681 
682  inputHandle->Allocate();
683  outputHandle->Allocate();
684  inputHandleRef->Allocate();
685  outputHandleRef->Allocate();
686 
687  CopyDataToITensorHandle(inputHandle.get(), input.data());
688  CopyDataToITensorHandle(inputHandleRef.get(), input.data());
689 
690  workload->PostAllocationConfigure();
691  workload->Execute();
692  workloadRef->PostAllocationConfigure();
693  workloadRef->Execute();
694 
695  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
696  CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
697 
698  return LayerTestResult<float, 4>(actualOutput,
699  expectedOutput,
700  outputHandle->GetShape(),
701  outputTensorInfo.GetShape());
702 }
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
const ConstTensorHandle * m_Variance
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
void IgnoreUnused(Ts &&...)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const
Definition: Tensor.hpp:196