ArmNN
 20.08
BatchNormalizationTestImpl.cpp File Reference

Go to the source code of this file.

Functions

LayerTestResult< float, 4 > BatchNormFloat32Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > BatchNormFloat32NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< armnn::Half, 4 > BatchNormFloat16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< armnn::Half, 4 > BatchNormFloat16NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > BatchNormUint8Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< uint8_t, 4 > BatchNormUint8NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > BatchNormInt16Test (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< int16_t, 4 > BatchNormInt16NhwcTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
 
LayerTestResult< float, 4 > CompareBatchNormTest (armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory)
 

Function Documentation

◆ BatchNormFloat16NhwcTest()

LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 353 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

356 {
357  // BatchSize: 1
358  // Height: 3
359  // Width: 2
360  // Channels: 2
361 
362  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
363  std::vector<float> inputValues
364  {
365  // Batch 0, Height 0, Width (2) x Channel (2)
366  1.f, 1.f,
367  4.f, 1.f,
368 
369  // Batch 0, Height 1, Width (2) x Channel (2)
370  4.f, 4.f,
371  2.f, 1.f,
372 
373  // Batch 0, Height 2, Width (2) x Channel (2)
374  1.f, -2.f,
375  6.f, 4.f
376  };
377  std::vector<float> expectedOutputValues
378  {
379  // Batch 0, Height 0, Width (2) x Channel (2)
380  1.f, 3.f,
381  4.f, 3.f,
382 
383  // Batch 0, Height 1, Width (2) x Channel (2)
384  4.f, 4.f,
385  2.f, 3.f,
386 
387  // Batch 0, Height 2, Width (2) x Channel (2)
388  1.f, 2.f,
389  6.f, 4.f
390  };
391 
392  return BatchNormTestImpl<armnn::DataType::Float16>(
393  workloadFactory,
394  memoryManager,
395  inputOutputShape,
396  inputValues,
397  expectedOutputValues,
398  0.f,
399  0,
401 }

◆ BatchNormFloat16Test()

LayerTestResult<armnn::Half, 4> BatchNormFloat16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 307 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

310 {
311  // BatchSize: 1
312  // Channels: 2
313  // Height: 3
314  // Width: 2
315 
316  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
317  std::vector<float> inputValues
318  {
319  // Batch 0, Channel 0, Height (3) x Width (2)
320  1.f, 4.f,
321  4.f, 2.f,
322  1.f, 6.f,
323 
324  // Batch 0, Channel 1, Height (3) x Width (2)
325  1.f, 1.f,
326  4.f, 1.f,
327  -2.f, 4.f
328  };
329  std::vector<float> expectedOutputValues
330  {
331  // Batch 0, Channel 0, Height (3) x Width (2)
332  1.f, 4.f,
333  4.f, 2.f,
334  1.f, 6.f,
335 
336  // Batch 0, Channel 1, Height (3) x Width (2)
337  3.f, 3.f,
338  4.f, 3.f,
339  2.f, 4.f
340  };
341 
342  return BatchNormTestImpl<armnn::DataType::Float16>(
343  workloadFactory,
344  memoryManager,
345  inputOutputShape,
346  inputValues,
347  expectedOutputValues,
348  0.f,
349  0,
351 }

◆ BatchNormFloat32NhwcTest()

LayerTestResult<float, 4> BatchNormFloat32NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 257 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

260 {
261  // BatchSize: 1
262  // Height: 3
263  // Width: 2
264  // Channels: 2
265 
266  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
267  std::vector<float> inputValues
268  {
269  // Batch 0, Height 0, Width (2) x Channel (2)
270  1.f, 1.f,
271  4.f, 1.f,
272 
273  // Batch 0, Height 1, Width (2) x Channel (2)
274  4.f, 4.f,
275  2.f, 1.f,
276 
277  // Batch 0, Height 2, Width (2) x Channel (2)
278  1.f, -2.f,
279  6.f, 4.f
280  };
281  std::vector<float> expectedOutputValues
282  {
283  // Batch 0, Height 0, Width (2) x Channel (2)
284  1.f, 3.f,
285  4.f, 3.f,
286 
287  // Batch 0, Height 1, Width (2) x Channel (2)
288  4.f, 4.f,
289  2.f, 3.f,
290 
291  // Batch 0, Height 2, Width (2) x Channel (2)
292  1.f, 2.f,
293  6.f, 4.f
294  };
295 
296  return BatchNormTestImpl<armnn::DataType::Float32>(
297  workloadFactory,
298  memoryManager,
299  inputOutputShape,
300  inputValues,
301  expectedOutputValues,
302  0.f,
303  0,
305 }

◆ BatchNormFloat32Test()

LayerTestResult<float, 4> BatchNormFloat32Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 211 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

214 {
215  // BatchSize: 1
216  // Channels: 2
217  // Height: 3
218  // Width: 2
219 
220  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
221  std::vector<float> inputValues
222  {
223  // Batch 0, Channel 0, Height (3) x Width (2)
224  1.f, 4.f,
225  4.f, 2.f,
226  1.f, 6.f,
227 
228  // Batch 0, Channel 1, Height (3) x Width (2)
229  1.f, 1.f,
230  4.f, 1.f,
231  -2.f, 4.f
232  };
233  std::vector<float> expectedOutputValues
234  {
235  // Batch 0, Channel 0, Height (3) x Width (2)
236  1.f, 4.f,
237  4.f, 2.f,
238  1.f, 6.f,
239 
240  // Batch 0, Channel 1, Height (3) x Width (2)
241  3.f, 3.f,
242  4.f, 3.f,
243  2.f, 4.f
244  };
245 
246  return BatchNormTestImpl<armnn::DataType::Float32>(
247  workloadFactory,
248  memoryManager,
249  inputOutputShape,
250  inputValues,
251  expectedOutputValues,
252  0.f,
253  0,
255 }

◆ BatchNormInt16NhwcTest()

LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 541 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

544 {
545  // BatchSize: 1
546  // Height: 3
547  // Width: 2
548  // Channels: 2
549 
550  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
551  std::vector<float> inputValues
552  {
553  // Batch 0, Height 0, Width (2) x Channel (2)
554  1.f, 1.f,
555  4.f, 1.f,
556 
557  // Batch 0, Height 1, Width (2) x Channel (2)
558  4.f, 4.f,
559  2.f, 1.f,
560 
561  // Batch 0, Height 2, Width (2) x Channel (2)
562  1.f, -2.f,
563  6.f, 4.f
564  };
565  std::vector<float> expectedOutputValues
566  {
567  // Batch 0, Height 0, Width (2) x Channel (2)
568  1.f, 3.f,
569  4.f, 3.f,
570 
571  // Batch 0, Height 1, Width (2) x Channel (2)
572  4.f, 4.f,
573  2.f, 3.f,
574 
575  // Batch 0, Height 2, Width (2) x Channel (2)
576  1.f, 2.f,
577  6.f, 4.f
578  };
579 
580  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
581  workloadFactory,
582  memoryManager,
583  inputOutputShape,
584  inputValues,
585  expectedOutputValues,
586  1.f / 20.f,
587  50,
589 }

◆ BatchNormInt16Test()

LayerTestResult<int16_t, 4> BatchNormInt16Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 495 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

498 {
499  // BatchSize: 1
500  // Channels: 2
501  // Height: 3
502  // Width: 2
503 
504  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
505  std::vector<float> inputValues
506  {
507  // Batch 0, Channel 0, Height (3) x Width (2)
508  1.f, 4.f,
509  4.f, 2.f,
510  1.f, 6.f,
511 
512  // Batch 0, Channel 1, Height (3) x Width (2)
513  1.f, 1.f,
514  4.f, 1.f,
515  -2.f, 4.f
516  };
517  std::vector<float> expectedOutputValues
518  {
519  // Batch 0, Channel 0, Height (3) x Width (2)
520  1.f, 4.f,
521  4.f, 2.f,
522  1.f, 6.f,
523 
524  // Batch 0, Channel 1, Height (3) x Width (2)
525  3.f, 3.f,
526  4.f, 3.f,
527  2.f, 4.f
528  };
529 
530  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
531  workloadFactory,
532  memoryManager,
533  inputOutputShape,
534  inputValues,
535  expectedOutputValues,
536  1.f / 20.f,
537  50,
539 }

◆ BatchNormUint8NhwcTest()

LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 449 of file BatchNormalizationTestImpl.cpp.

References armnn::NHWC.

452 {
453  // BatchSize: 1
454  // Height: 3
455  // Width: 2
456  // Channels: 2
457 
458  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
459  std::vector<float> inputValues
460  {
461  // Batch 0, Height 0, Width (2) x Channel (2)
462  1.f, 1.f,
463  4.f, 1.f,
464 
465  // Batch 0, Height 1, Width (2) x Channel (2)
466  4.f, 4.f,
467  2.f, 1.f,
468 
469  // Batch 0, Height 2, Width (2) x Channel (2)
470  1.f, -2.f,
471  6.f, 4.f
472  };
473  std::vector<float> expectedOutputValues
474  {
475  // Batch 0, Height 0, Width (2) x Channel (2)
476  1.f, 3.f,
477  4.f, 3.f,
478 
479  // Batch 0, Height 1, Width (2) x Channel (2)
480  4.f, 4.f,
481  2.f, 3.f,
482 
483  // Batch 0, Height 2, Width (2) x Channel (2)
484  1.f, 2.f,
485  6.f, 4.f
486  };
487 
488  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
489  workloadFactory,
490  memoryManager,
491  inputOutputShape, inputValues, expectedOutputValues,
492  1.f/20.f, 50, armnn::DataLayout::NHWC);
493 }

◆ BatchNormUint8Test()

LayerTestResult<uint8_t, 4> BatchNormUint8Test ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager 
)

Definition at line 403 of file BatchNormalizationTestImpl.cpp.

References armnn::NCHW.

406 {
407  // BatchSize: 1
408  // Channels: 2
409  // Height: 3
410  // Width: 2
411 
412  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
413  std::vector<float> inputValues
414  {
415  // Batch 0, Channel 0, Height (3) x Width (2)
416  1.f, 4.f,
417  4.f, 2.f,
418  1.f, 6.f,
419 
420  // Batch 0, Channel 1, Height (3) x Width (2)
421  1.f, 1.f,
422  4.f, 1.f,
423  -2.f, 4.f
424  };
425  std::vector<float> expectedOutputValues
426  {
427  // Batch 0, Channel 0, Height (3) x Width (2)
428  1.f, 4.f,
429  4.f, 2.f,
430  1.f, 6.f,
431 
432  // Batch 0, Channel 1, Height (3) x Width (2)
433  3.f, 3.f,
434  4.f, 3.f,
435  2.f, 4.f
436  };
437 
438  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
439  workloadFactory,
440  memoryManager,
441  inputOutputShape,
442  inputValues,
443  expectedOutputValues,
444  1.f / 20.f,
445  50,
447 }

◆ CompareBatchNormTest()

LayerTestResult<float,4> CompareBatchNormTest ( armnn::IWorkloadFactory workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr memoryManager,
armnn::IWorkloadFactory refWorkloadFactory 
)

Definition at line 591 of file BatchNormalizationTestImpl.cpp.

References AllocateAndCopyDataToITensorHandle(), ARMNN_NO_DEPRECATE_WARN_BEGIN, ARMNN_NO_DEPRECATE_WARN_END, CopyDataFromITensorHandle(), CopyDataToITensorHandle(), IWorkloadFactory::CreateBatchNormalization(), IWorkloadFactory::CreateTensorHandle(), armnn::Float32, and armnn::IgnoreUnused().

595 {
596  IgnoreUnused(memoryManager);
597  const unsigned int width = 2;
598  const unsigned int height = 3;
599  const unsigned int channels = 5;
600  const unsigned int batchSize = 3;
601 
602  armnn::TensorInfo inputTensorInfo;
603  armnn::TensorInfo outputTensorInfo;
604  armnn::TensorInfo tensorInfo;
605 
606  constexpr unsigned int shape[] = {batchSize, channels, height, width};
607  constexpr unsigned int tensorShape[] = {channels};
608 
609  inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
610  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
611  tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
612 
613  auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
614 
615  auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
616  auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
617  auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
618  auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
619 
620  LayerTestResult<float,4> ret(outputTensorInfo);
621 
623  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
624  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
625 
626  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
627  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
629 
632  armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
633  armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
634  armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
635  armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
636 
637  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
638  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
639  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
640  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
641 
642  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
643  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
644  data.m_Mean = &meanTensor;
645  data.m_Variance = &varianceTensor;
646  data.m_Beta = &betaTensor;
647  data.m_Gamma = &gammaTensor;
648  data.m_Parameters.m_Eps = 0.01f;
649 
651  armnn::WorkloadInfo refInfo = info;
652  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
653  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
654 
655  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
656  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
657 
658  inputHandle->Allocate();
659  outputHandle->Allocate();
660  inputHandleRef->Allocate();
661  outputHandleRef->Allocate();
662 
663  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
664  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
665 
666  workload->PostAllocationConfigure();
667  workload->Execute();
668  workloadRef->PostAllocationConfigure();
669  workloadRef->Execute();
670 
671  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
672  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
673 
674  return ret;
675 }
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)