ArmNN
 21.02
BatchNormalizationTestImpl.cpp File Reference

Go to the source code of this file.

Functions

LayerTestResult< float, 4 > BatchNormFloat32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > BatchNormFloat32NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< armnn::Half, 4 > BatchNormFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< armnn::Half, 4 > BatchNormFloat16NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > BatchNormUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< uint8_t, 4 > BatchNormUint8NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > BatchNormInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< int16_t, 4 > BatchNormInt16NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
 
LayerTestResult< float, 4 > CompareBatchNormTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
 

Function Documentation

◆ BatchNormFloat16NhwcTest()

LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 358 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

362 {
363  // BatchSize: 1
364  // Height: 3
365  // Width: 2
366  // Channels: 2
367 
368  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
369  std::vector<float> inputValues
370  {
371  // Batch 0, Height 0, Width (2) x Channel (2)
372  1.f, 1.f,
373  4.f, 1.f,
374 
375  // Batch 0, Height 1, Width (2) x Channel (2)
376  4.f, 4.f,
377  2.f, 1.f,
378 
379  // Batch 0, Height 2, Width (2) x Channel (2)
380  1.f, -2.f,
381  6.f, 4.f
382  };
383  std::vector<float> expectedOutputValues
384  {
385  // Batch 0, Height 0, Width (2) x Channel (2)
386  1.f, 3.f,
387  4.f, 3.f,
388 
389  // Batch 0, Height 1, Width (2) x Channel (2)
390  4.f, 4.f,
391  2.f, 3.f,
392 
393  // Batch 0, Height 2, Width (2) x Channel (2)
394  1.f, 2.f,
395  6.f, 4.f
396  };
397 
398  return BatchNormTestImpl<armnn::DataType::Float16>(
399  workloadFactory,
400  memoryManager,
401  tensorHandleFactory,
402  inputOutputShape,
403  inputValues,
404  expectedOutputValues,
405  0.f,
406  0,
408 }

◆ BatchNormFloat16Test()

LayerTestResult<armnn::Half, 4> BatchNormFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 310 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

314 {
315  // BatchSize: 1
316  // Channels: 2
317  // Height: 3
318  // Width: 2
319 
320  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
321  std::vector<float> inputValues
322  {
323  // Batch 0, Channel 0, Height (3) x Width (2)
324  1.f, 4.f,
325  4.f, 2.f,
326  1.f, 6.f,
327 
328  // Batch 0, Channel 1, Height (3) x Width (2)
329  1.f, 1.f,
330  4.f, 1.f,
331  -2.f, 4.f
332  };
333  std::vector<float> expectedOutputValues
334  {
335  // Batch 0, Channel 0, Height (3) x Width (2)
336  1.f, 4.f,
337  4.f, 2.f,
338  1.f, 6.f,
339 
340  // Batch 0, Channel 1, Height (3) x Width (2)
341  3.f, 3.f,
342  4.f, 3.f,
343  2.f, 4.f
344  };
345 
346  return BatchNormTestImpl<armnn::DataType::Float16>(
347  workloadFactory,
348  memoryManager,
349  tensorHandleFactory,
350  inputOutputShape,
351  inputValues,
352  expectedOutputValues,
353  0.f,
354  0,
356 }

◆ BatchNormFloat32NhwcTest()

LayerTestResult<float, 4> BatchNormFloat32NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 258 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

262 {
263  // BatchSize: 1
264  // Height: 3
265  // Width: 2
266  // Channels: 2
267 
268  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
269  std::vector<float> inputValues
270  {
271  // Batch 0, Height 0, Width (2) x Channel (2)
272  1.f, 1.f,
273  4.f, 1.f,
274 
275  // Batch 0, Height 1, Width (2) x Channel (2)
276  4.f, 4.f,
277  2.f, 1.f,
278 
279  // Batch 0, Height 2, Width (2) x Channel (2)
280  1.f, -2.f,
281  6.f, 4.f
282  };
283  std::vector<float> expectedOutputValues
284  {
285  // Batch 0, Height 0, Width (2) x Channel (2)
286  1.f, 3.f,
287  4.f, 3.f,
288 
289  // Batch 0, Height 1, Width (2) x Channel (2)
290  4.f, 4.f,
291  2.f, 3.f,
292 
293  // Batch 0, Height 2, Width (2) x Channel (2)
294  1.f, 2.f,
295  6.f, 4.f
296  };
297 
298  return BatchNormTestImpl<armnn::DataType::Float32>(
299  workloadFactory,
300  memoryManager,
301  tensorHandleFactory,
302  inputOutputShape,
303  inputValues,
304  expectedOutputValues,
305  0.f,
306  0,
308 }

◆ BatchNormFloat32Test()

LayerTestResult<float, 4> BatchNormFloat32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 210 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

214 {
215  // BatchSize: 1
216  // Channels: 2
217  // Height: 3
218  // Width: 2
219 
220  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
221  std::vector<float> inputValues
222  {
223  // Batch 0, Channel 0, Height (3) x Width (2)
224  1.f, 4.f,
225  4.f, 2.f,
226  1.f, 6.f,
227 
228  // Batch 0, Channel 1, Height (3) x Width (2)
229  1.f, 1.f,
230  4.f, 1.f,
231  -2.f, 4.f
232  };
233  std::vector<float> expectedOutputValues
234  {
235  // Batch 0, Channel 0, Height (3) x Width (2)
236  1.f, 4.f,
237  4.f, 2.f,
238  1.f, 6.f,
239 
240  // Batch 0, Channel 1, Height (3) x Width (2)
241  3.f, 3.f,
242  4.f, 3.f,
243  2.f, 4.f
244  };
245 
246  return BatchNormTestImpl<armnn::DataType::Float32>(
247  workloadFactory,
248  memoryManager,
249  tensorHandleFactory,
250  inputOutputShape,
251  inputValues,
252  expectedOutputValues,
253  0.f,
254  0,
256 }

◆ BatchNormInt16NhwcTest()

LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 554 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

558 {
559  // BatchSize: 1
560  // Height: 3
561  // Width: 2
562  // Channels: 2
563 
564  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
565  std::vector<float> inputValues
566  {
567  // Batch 0, Height 0, Width (2) x Channel (2)
568  1.f, 1.f,
569  4.f, 1.f,
570 
571  // Batch 0, Height 1, Width (2) x Channel (2)
572  4.f, 4.f,
573  2.f, 1.f,
574 
575  // Batch 0, Height 2, Width (2) x Channel (2)
576  1.f, -2.f,
577  6.f, 4.f
578  };
579  std::vector<float> expectedOutputValues
580  {
581  // Batch 0, Height 0, Width (2) x Channel (2)
582  1.f, 3.f,
583  4.f, 3.f,
584 
585  // Batch 0, Height 1, Width (2) x Channel (2)
586  4.f, 4.f,
587  2.f, 3.f,
588 
589  // Batch 0, Height 2, Width (2) x Channel (2)
590  1.f, 2.f,
591  6.f, 4.f
592  };
593 
594  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
595  workloadFactory,
596  memoryManager,
597  tensorHandleFactory,
598  inputOutputShape,
599  inputValues,
600  expectedOutputValues,
601  1.f / 20.f,
602  50,
604 }

◆ BatchNormInt16Test()

LayerTestResult<int16_t, 4> BatchNormInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 506 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

510 {
511  // BatchSize: 1
512  // Channels: 2
513  // Height: 3
514  // Width: 2
515 
516  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
517  std::vector<float> inputValues
518  {
519  // Batch 0, Channel 0, Height (3) x Width (2)
520  1.f, 4.f,
521  4.f, 2.f,
522  1.f, 6.f,
523 
524  // Batch 0, Channel 1, Height (3) x Width (2)
525  1.f, 1.f,
526  4.f, 1.f,
527  -2.f, 4.f
528  };
529  std::vector<float> expectedOutputValues
530  {
531  // Batch 0, Channel 0, Height (3) x Width (2)
532  1.f, 4.f,
533  4.f, 2.f,
534  1.f, 6.f,
535 
536  // Batch 0, Channel 1, Height (3) x Width (2)
537  3.f, 3.f,
538  4.f, 3.f,
539  2.f, 4.f
540  };
541 
542  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
543  workloadFactory,
544  memoryManager,
545  tensorHandleFactory,
546  inputOutputShape,
547  inputValues,
548  expectedOutputValues,
549  1.f / 20.f,
550  50,
552 }

◆ BatchNormUint8NhwcTest()

LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 458 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

462 {
463  // BatchSize: 1
464  // Height: 3
465  // Width: 2
466  // Channels: 2
467 
468  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
469  std::vector<float> inputValues
470  {
471  // Batch 0, Height 0, Width (2) x Channel (2)
472  1.f, 1.f,
473  4.f, 1.f,
474 
475  // Batch 0, Height 1, Width (2) x Channel (2)
476  4.f, 4.f,
477  2.f, 1.f,
478 
479  // Batch 0, Height 2, Width (2) x Channel (2)
480  1.f, -2.f,
481  6.f, 4.f
482  };
483  std::vector<float> expectedOutputValues
484  {
485  // Batch 0, Height 0, Width (2) x Channel (2)
486  1.f, 3.f,
487  4.f, 3.f,
488 
489  // Batch 0, Height 1, Width (2) x Channel (2)
490  4.f, 4.f,
491  2.f, 3.f,
492 
493  // Batch 0, Height 2, Width (2) x Channel (2)
494  1.f, 2.f,
495  6.f, 4.f
496  };
497 
498  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
499  workloadFactory,
500  memoryManager,
501  tensorHandleFactory,
502  inputOutputShape, inputValues, expectedOutputValues,
503  1.f/20.f, 50, armnn::DataLayout::NHWC);
504 }

◆ BatchNormUint8Test()

LayerTestResult<uint8_t, 4> BatchNormUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
const armnn::ITensorHandleFactory tensorHandleFactory 
)

Definition at line 410 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

414 {
415  // BatchSize: 1
416  // Channels: 2
417  // Height: 3
418  // Width: 2
419 
420  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
421  std::vector<float> inputValues
422  {
423  // Batch 0, Channel 0, Height (3) x Width (2)
424  1.f, 4.f,
425  4.f, 2.f,
426  1.f, 6.f,
427 
428  // Batch 0, Channel 1, Height (3) x Width (2)
429  1.f, 1.f,
430  4.f, 1.f,
431  -2.f, 4.f
432  };
433  std::vector<float> expectedOutputValues
434  {
435  // Batch 0, Channel 0, Height (3) x Width (2)
436  1.f, 4.f,
437  4.f, 2.f,
438  1.f, 6.f,
439 
440  // Batch 0, Channel 1, Height (3) x Width (2)
441  3.f, 3.f,
442  4.f, 3.f,
443  2.f, 4.f
444  };
445 
446  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
447  workloadFactory,
448  memoryManager,
449  tensorHandleFactory,
450  inputOutputShape,
451  inputValues,
452  expectedOutputValues,
453  1.f / 20.f,
454  50,
456 }

◆ CompareBatchNormTest()

LayerTestResult<float,4> CompareBatchNormTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory,
const armnn::ITensorHandleFactory tensorHandleFactory,
const armnn::ITensorHandleFactory refTensorHandleFactory 
)

Definition at line 606 of file BatchNormalizationTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateBatchNormalization(), ITensorHandleFactory::CreateTensorHandle(), armnn::Float32, armnn::IgnoreUnused(), BatchNormalizationQueueDescriptor::m_Beta, BatchNormalizationDescriptor::m_Eps, BatchNormalizationQueueDescriptor::m_Gamma, BatchNormalizationQueueDescriptor::m_Mean, QueueDescriptorWithParameters< LayerDescriptor >::m_Parameters, and BatchNormalizationQueueDescriptor::m_Variance.

612 {
613  IgnoreUnused(memoryManager);
614  const unsigned int width = 2;
615  const unsigned int height = 3;
616  const unsigned int channels = 5;
617  const unsigned int batchSize = 3;
618 
619  armnn::TensorInfo inputTensorInfo;
620  armnn::TensorInfo outputTensorInfo;
621  armnn::TensorInfo tensorInfo;
622 
623  constexpr unsigned int shape[] = {batchSize, channels, height, width};
624  constexpr unsigned int tensorShape[] = {channels};
625 
626  inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
627  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
628  tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
629 
630  auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
631 
632  auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
633  auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
634  auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
635  auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
636 
637  LayerTestResult<float,4> ret(outputTensorInfo);
638 
639  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
640  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
641 
642  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
643  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
644 
647  armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
648  armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
649  armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
650  armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
651 
652  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
653  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
654  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
655  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
656 
657  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
658  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
659  data.m_Mean = &meanTensor;
660  data.m_Variance = &varianceTensor;
661  data.m_Beta = &betaTensor;
662  data.m_Gamma = &gammaTensor;
663  data.m_Parameters.m_Eps = 0.01f;
664 
666  armnn::WorkloadInfo refInfo = info;
667  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
668  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
669 
670  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
671  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
672 
673  inputHandle->Allocate();
674  outputHandle->Allocate();
675  inputHandleRef->Allocate();
676  outputHandleRef->Allocate();
677 
678  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
679  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
680 
681  workload->PostAllocationConfigure();
682  workload->Execute();
683  workloadRef->PostAllocationConfigure();
684  workloadRef->Execute();
685 
686  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
687  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
688 
689  return ret;
690 }
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
const ConstCpuTensorHandle * m_Gamma
const ConstCpuTensorHandle * m_Beta
const ConstCpuTensorHandle * m_Mean
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
const ConstCpuTensorHandle * m_Variance
void IgnoreUnused(Ts &&...)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)