ArmNN
 22.02
BatchNormalizationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
9 #include <ResolveType.hpp>
10 
13 
18 
21 
23 
24 namespace
25 {
26 
27 using namespace armnnUtils;
28 
29 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
30 LayerTestResult<T, 4> BatchNormTestImpl(
31  armnn::IWorkloadFactory& workloadFactory,
33  const armnn::ITensorHandleFactory& tensorHandleFactory,
34  const armnn::TensorShape& inputOutputTensorShape,
35  const std::vector<float>& inputValues,
36  const std::vector<float>& expectedOutputValues,
37  float qScale,
38  int32_t qOffset,
39  armnn::DataLayout dataLayout)
40 {
41  IgnoreUnused(memoryManager);
42  armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
43  armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
44 
45  armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
46 
47  armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
48  ArmnnType);
49 
50  // Set quantization parameters if the requested type is a quantized type.
51  if (armnn::IsQuantizedType<T>())
52  {
53  inputTensorInfo.SetQuantizationScale(qScale);
54  inputTensorInfo.SetQuantizationOffset(qOffset);
55  outputTensorInfo.SetQuantizationScale(qScale);
56  outputTensorInfo.SetQuantizationOffset(qOffset);
57  tensorInfo.SetQuantizationScale(qScale);
58  tensorInfo.SetQuantizationOffset(qOffset);
59  }
60 
61  auto inputTensor = QuantizedVector<T>(inputValues, qScale, qOffset);
62 
63  // These values are per-channel of the input.
64  auto mean = QuantizedVector<T>({ 3, -2 }, qScale, qOffset);
65  auto variance = QuantizedVector<T>({ 4, 9 }, qScale, qOffset);
66  auto beta = QuantizedVector<T>({ 3, 2 }, qScale, qOffset);
67  auto gamma = QuantizedVector<T>({ 2, 1 }, qScale, qOffset);
68 
69  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
70  std::vector<T> expectedOutput = QuantizedVector<T>(expectedOutputValues, qScale, qOffset);
71 
72  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
73  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
74 
75  armnn::ScopedTensorHandle meanTensor(tensorInfo);
76  armnn::ScopedTensorHandle varianceTensor(tensorInfo);
77  armnn::ScopedTensorHandle betaTensor(tensorInfo);
78  armnn::ScopedTensorHandle gammaTensor(tensorInfo);
79 
81  descriptor.m_Mean = &meanTensor;
82  descriptor.m_Variance = &varianceTensor;
83  descriptor.m_Beta = &betaTensor;
84  descriptor.m_Gamma = &gammaTensor;
85  descriptor.m_Parameters.m_Eps = 0.0f;
86  descriptor.m_Parameters.m_DataLayout = dataLayout;
88 
89  AllocateAndCopyDataToITensorHandle(&meanTensor, mean.data());
90  AllocateAndCopyDataToITensorHandle(&varianceTensor, variance.data());
91  AllocateAndCopyDataToITensorHandle(&betaTensor, beta.data());
92  AllocateAndCopyDataToITensorHandle(&gammaTensor, gamma.data());
93 
94  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
95  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
96 
97  std::unique_ptr<armnn::IWorkload> workload
98  = workloadFactory.CreateWorkload(armnn::LayerType::BatchNormalization, descriptor, info);
99 
100  inputHandle->Allocate();
101  outputHandle->Allocate();
102 
103  CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
104 
105  workload->Execute();
106 
107  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
108 
109  return LayerTestResult<T, 4>(actualOutput,
110  expectedOutput,
111  outputHandle->GetShape(),
112  outputTensorInfo.GetShape());
113 }
114 
115 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
116 LayerTestResult<T,4> BatchNormTestNhwcImpl(
117  armnn::IWorkloadFactory& workloadFactory,
119  const armnn::ITensorHandleFactory& tensorHandleFactory,
120  float qScale,
121  int32_t qOffset)
122 {
123  IgnoreUnused(memoryManager);
124 
125  const unsigned int width = 2;
126  const unsigned int height = 3;
127  const unsigned int channels = 2;
128  const unsigned int num = 1;
129 
130  armnn::TensorInfo inputTensorInfo({num, height, width, channels}, ArmnnType);
131  armnn::TensorInfo outputTensorInfo({num, height, width, channels}, ArmnnType);
132  armnn::TensorInfo tensorInfo({channels}, ArmnnType);
133 
134  // Set quantization parameters if the requested type is a quantized type.
135  if(armnn::IsQuantizedType<T>())
136  {
137  inputTensorInfo.SetQuantizationScale(qScale);
138  inputTensorInfo.SetQuantizationOffset(qOffset);
139  outputTensorInfo.SetQuantizationScale(qScale);
140  outputTensorInfo.SetQuantizationOffset(qOffset);
141  tensorInfo.SetQuantizationScale(qScale);
142  tensorInfo.SetQuantizationOffset(qOffset);
143  }
144 
145  auto input = QuantizedVector<T>(
146  {
147  1.f, 1.f, 4.f, 1.f,
148  4.f, 4.f, 2.f, 1.f,
149  1.f, -2.f, 6.f, 4.f
150  },
151  qScale, qOffset);
152 
153  // These values are per-channel of the input.
154  auto mean = QuantizedVector<T>({ 3, -2 }, qScale, qOffset);
155  auto variance = QuantizedVector<T>({ 4, 9 }, qScale, qOffset);
156  auto beta = QuantizedVector<T>({ 3, 2 }, qScale, qOffset);
157  auto gamma = QuantizedVector<T>({ 2, 1 }, qScale, qOffset);
158 
159  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
160  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
161 
163  armnn::WorkloadInfo info;
164  armnn::ScopedTensorHandle meanTensor(tensorInfo);
165  armnn::ScopedTensorHandle varianceTensor(tensorInfo);
166  armnn::ScopedTensorHandle betaTensor(tensorInfo);
167  armnn::ScopedTensorHandle gammaTensor(tensorInfo);
168 
169  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
170  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
171  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
172  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
173 
174  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
175  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
176  data.m_Mean = &meanTensor;
177  data.m_Variance = &varianceTensor;
178  data.m_Beta = &betaTensor;
179  data.m_Gamma = &gammaTensor;
180  data.m_Parameters.m_Eps = 0.0f;
182 
183  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
184 
185  // For each channel:
186  // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
187  // multiply by gamma and add beta
188  std::vector<T> expectedOutput = QuantizedVector<T>(
189  {
190  1.f, 3.f, 4.f, 3.f,
191  4.f, 4.f, 2.f, 3.f,
192  1.f, 2.f, 6.f, 4.f
193  },
194  qScale, qOffset);
195 
196  std::unique_ptr<armnn::IWorkload> workload
197  = workloadFactory.CreateWorkload(armnn::LayerType::BatchNormalization, data, info);
198 
199  inputHandle->Allocate();
200  outputHandle->Allocate();
201 
202  CopyDataToITensorHandle(inputHandle.get(), input.data());
203 
204  workload->Execute();
205 
206  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
207 
208  return LayerTestResult<T, 4>(actualOutput,
209  expectedOutput,
210  outputHandle->GetShape(),
211  outputTensorInfo.GetShape());
212 }
213 
214 } // anonymous namespace
215 
217  armnn::IWorkloadFactory& workloadFactory,
219  const armnn::ITensorHandleFactory& tensorHandleFactory)
220 {
221  // BatchSize: 1
222  // Channels: 2
223  // Height: 3
224  // Width: 2
225 
226  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
227  std::vector<float> inputValues
228  {
229  // Batch 0, Channel 0, Height (3) x Width (2)
230  1.f, 4.f,
231  4.f, 2.f,
232  1.f, 6.f,
233 
234  // Batch 0, Channel 1, Height (3) x Width (2)
235  1.f, 1.f,
236  4.f, 1.f,
237  -2.f, 4.f
238  };
239  std::vector<float> expectedOutputValues
240  {
241  // Batch 0, Channel 0, Height (3) x Width (2)
242  1.f, 4.f,
243  4.f, 2.f,
244  1.f, 6.f,
245 
246  // Batch 0, Channel 1, Height (3) x Width (2)
247  3.f, 3.f,
248  4.f, 3.f,
249  2.f, 4.f
250  };
251 
252  return BatchNormTestImpl<armnn::DataType::Float32>(
253  workloadFactory,
254  memoryManager,
255  tensorHandleFactory,
256  inputOutputShape,
257  inputValues,
258  expectedOutputValues,
259  0.f,
260  0,
262 }
263 
265  armnn::IWorkloadFactory& workloadFactory,
267  const armnn::ITensorHandleFactory& tensorHandleFactory)
268 {
269  // BatchSize: 1
270  // Height: 3
271  // Width: 2
272  // Channels: 2
273 
274  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
275  std::vector<float> inputValues
276  {
277  // Batch 0, Height 0, Width (2) x Channel (2)
278  1.f, 1.f,
279  4.f, 1.f,
280 
281  // Batch 0, Height 1, Width (2) x Channel (2)
282  4.f, 4.f,
283  2.f, 1.f,
284 
285  // Batch 0, Height 2, Width (2) x Channel (2)
286  1.f, -2.f,
287  6.f, 4.f
288  };
289  std::vector<float> expectedOutputValues
290  {
291  // Batch 0, Height 0, Width (2) x Channel (2)
292  1.f, 3.f,
293  4.f, 3.f,
294 
295  // Batch 0, Height 1, Width (2) x Channel (2)
296  4.f, 4.f,
297  2.f, 3.f,
298 
299  // Batch 0, Height 2, Width (2) x Channel (2)
300  1.f, 2.f,
301  6.f, 4.f
302  };
303 
304  return BatchNormTestImpl<armnn::DataType::Float32>(
305  workloadFactory,
306  memoryManager,
307  tensorHandleFactory,
308  inputOutputShape,
309  inputValues,
310  expectedOutputValues,
311  0.f,
312  0,
314 }
315 
317  armnn::IWorkloadFactory& workloadFactory,
319  const armnn::ITensorHandleFactory& tensorHandleFactory)
320 {
321  // BatchSize: 1
322  // Channels: 2
323  // Height: 3
324  // Width: 2
325 
326  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
327  std::vector<float> inputValues
328  {
329  // Batch 0, Channel 0, Height (3) x Width (2)
330  1.f, 4.f,
331  4.f, 2.f,
332  1.f, 6.f,
333 
334  // Batch 0, Channel 1, Height (3) x Width (2)
335  1.f, 1.f,
336  4.f, 1.f,
337  -2.f, 4.f
338  };
339  std::vector<float> expectedOutputValues
340  {
341  // Batch 0, Channel 0, Height (3) x Width (2)
342  1.f, 4.f,
343  4.f, 2.f,
344  1.f, 6.f,
345 
346  // Batch 0, Channel 1, Height (3) x Width (2)
347  3.f, 3.f,
348  4.f, 3.f,
349  2.f, 4.f
350  };
351 
352  return BatchNormTestImpl<armnn::DataType::Float16>(
353  workloadFactory,
354  memoryManager,
355  tensorHandleFactory,
356  inputOutputShape,
357  inputValues,
358  expectedOutputValues,
359  0.f,
360  0,
362 }
363 
365  armnn::IWorkloadFactory& workloadFactory,
367  const armnn::ITensorHandleFactory& tensorHandleFactory)
368 {
369  // BatchSize: 1
370  // Height: 3
371  // Width: 2
372  // Channels: 2
373 
374  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
375  std::vector<float> inputValues
376  {
377  // Batch 0, Height 0, Width (2) x Channel (2)
378  1.f, 1.f,
379  4.f, 1.f,
380 
381  // Batch 0, Height 1, Width (2) x Channel (2)
382  4.f, 4.f,
383  2.f, 1.f,
384 
385  // Batch 0, Height 2, Width (2) x Channel (2)
386  1.f, -2.f,
387  6.f, 4.f
388  };
389  std::vector<float> expectedOutputValues
390  {
391  // Batch 0, Height 0, Width (2) x Channel (2)
392  1.f, 3.f,
393  4.f, 3.f,
394 
395  // Batch 0, Height 1, Width (2) x Channel (2)
396  4.f, 4.f,
397  2.f, 3.f,
398 
399  // Batch 0, Height 2, Width (2) x Channel (2)
400  1.f, 2.f,
401  6.f, 4.f
402  };
403 
404  return BatchNormTestImpl<armnn::DataType::Float16>(
405  workloadFactory,
406  memoryManager,
407  tensorHandleFactory,
408  inputOutputShape,
409  inputValues,
410  expectedOutputValues,
411  0.f,
412  0,
414 }
415 
417  armnn::IWorkloadFactory& workloadFactory,
419  const armnn::ITensorHandleFactory& tensorHandleFactory)
420 {
421  // BatchSize: 1
422  // Channels: 2
423  // Height: 3
424  // Width: 2
425 
426  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
427  std::vector<float> inputValues
428  {
429  // Batch 0, Channel 0, Height (3) x Width (2)
430  1.f, 4.f,
431  4.f, 2.f,
432  1.f, 6.f,
433 
434  // Batch 0, Channel 1, Height (3) x Width (2)
435  1.f, 1.f,
436  4.f, 1.f,
437  -2.f, 4.f
438  };
439  std::vector<float> expectedOutputValues
440  {
441  // Batch 0, Channel 0, Height (3) x Width (2)
442  1.f, 4.f,
443  4.f, 2.f,
444  1.f, 6.f,
445 
446  // Batch 0, Channel 1, Height (3) x Width (2)
447  3.f, 3.f,
448  4.f, 3.f,
449  2.f, 4.f
450  };
451 
452  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
453  workloadFactory,
454  memoryManager,
455  tensorHandleFactory,
456  inputOutputShape,
457  inputValues,
458  expectedOutputValues,
459  1.f / 20.f,
460  50,
462 }
463 
465  armnn::IWorkloadFactory& workloadFactory,
467  const armnn::ITensorHandleFactory& tensorHandleFactory)
468 {
469  // BatchSize: 1
470  // Height: 3
471  // Width: 2
472  // Channels: 2
473 
474  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
475  std::vector<float> inputValues
476  {
477  // Batch 0, Height 0, Width (2) x Channel (2)
478  1.f, 1.f,
479  4.f, 1.f,
480 
481  // Batch 0, Height 1, Width (2) x Channel (2)
482  4.f, 4.f,
483  2.f, 1.f,
484 
485  // Batch 0, Height 2, Width (2) x Channel (2)
486  1.f, -2.f,
487  6.f, 4.f
488  };
489  std::vector<float> expectedOutputValues
490  {
491  // Batch 0, Height 0, Width (2) x Channel (2)
492  1.f, 3.f,
493  4.f, 3.f,
494 
495  // Batch 0, Height 1, Width (2) x Channel (2)
496  4.f, 4.f,
497  2.f, 3.f,
498 
499  // Batch 0, Height 2, Width (2) x Channel (2)
500  1.f, 2.f,
501  6.f, 4.f
502  };
503 
504  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
505  workloadFactory,
506  memoryManager,
507  tensorHandleFactory,
508  inputOutputShape, inputValues, expectedOutputValues,
509  1.f/20.f, 50, armnn::DataLayout::NHWC);
510 }
511 
513  armnn::IWorkloadFactory& workloadFactory,
515  const armnn::ITensorHandleFactory& tensorHandleFactory)
516 {
517  // BatchSize: 1
518  // Channels: 2
519  // Height: 3
520  // Width: 2
521 
522  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
523  std::vector<float> inputValues
524  {
525  // Batch 0, Channel 0, Height (3) x Width (2)
526  1.f, 4.f,
527  4.f, 2.f,
528  1.f, 6.f,
529 
530  // Batch 0, Channel 1, Height (3) x Width (2)
531  1.f, 1.f,
532  4.f, 1.f,
533  -2.f, 4.f
534  };
535  std::vector<float> expectedOutputValues
536  {
537  // Batch 0, Channel 0, Height (3) x Width (2)
538  1.f, 4.f,
539  4.f, 2.f,
540  1.f, 6.f,
541 
542  // Batch 0, Channel 1, Height (3) x Width (2)
543  3.f, 3.f,
544  4.f, 3.f,
545  2.f, 4.f
546  };
547 
548  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
549  workloadFactory,
550  memoryManager,
551  tensorHandleFactory,
552  inputOutputShape,
553  inputValues,
554  expectedOutputValues,
555  1.f / 20.f,
556  50,
558 }
559 
561  armnn::IWorkloadFactory& workloadFactory,
563  const armnn::ITensorHandleFactory& tensorHandleFactory)
564 {
565  // BatchSize: 1
566  // Height: 3
567  // Width: 2
568  // Channels: 2
569 
570  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
571  std::vector<float> inputValues
572  {
573  // Batch 0, Height 0, Width (2) x Channel (2)
574  1.f, 1.f,
575  4.f, 1.f,
576 
577  // Batch 0, Height 1, Width (2) x Channel (2)
578  4.f, 4.f,
579  2.f, 1.f,
580 
581  // Batch 0, Height 2, Width (2) x Channel (2)
582  1.f, -2.f,
583  6.f, 4.f
584  };
585  std::vector<float> expectedOutputValues
586  {
587  // Batch 0, Height 0, Width (2) x Channel (2)
588  1.f, 3.f,
589  4.f, 3.f,
590 
591  // Batch 0, Height 1, Width (2) x Channel (2)
592  4.f, 4.f,
593  2.f, 3.f,
594 
595  // Batch 0, Height 2, Width (2) x Channel (2)
596  1.f, 2.f,
597  6.f, 4.f
598  };
599 
600  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
601  workloadFactory,
602  memoryManager,
603  tensorHandleFactory,
604  inputOutputShape,
605  inputValues,
606  expectedOutputValues,
607  1.f / 20.f,
608  50,
610 }
611 
613  armnn::IWorkloadFactory& workloadFactory,
615  armnn::IWorkloadFactory& refWorkloadFactory,
616  const armnn::ITensorHandleFactory& tensorHandleFactory,
617  const armnn::ITensorHandleFactory& refTensorHandleFactory)
618 {
619  IgnoreUnused(memoryManager);
620  const unsigned int width = 2;
621  const unsigned int height = 3;
622  const unsigned int channels = 5;
623  const unsigned int batchSize = 3;
624 
625  armnn::TensorInfo inputTensorInfo;
626  armnn::TensorInfo outputTensorInfo;
627  armnn::TensorInfo tensorInfo;
628 
629  constexpr unsigned int shape[] = {batchSize, channels, height, width};
630  constexpr unsigned int tensorShape[] = {channels};
631 
632  inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
633  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
634  tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
635 
636  auto input = MakeRandomTensor<float>(inputTensorInfo, 21312);
637 
638  auto mean = MakeRandomTensor<float>(tensorInfo, 123);
639  auto variance = MakeRandomTensor<float>(tensorInfo, 234, 0.0f);
640  auto beta = MakeRandomTensor<float>(tensorInfo, 123);
641  auto gamma = MakeRandomTensor<float>(tensorInfo, 345);
642 
643  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
644  std::vector<float> expectedOutput(outputTensorInfo.GetNumElements());
645 
646  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
647  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
648 
649  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
650  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
651 
653  armnn::WorkloadInfo info;
654  armnn::ScopedTensorHandle meanTensor(tensorInfo);
655  armnn::ScopedTensorHandle varianceTensor(tensorInfo);
656  armnn::ScopedTensorHandle betaTensor(tensorInfo);
657  armnn::ScopedTensorHandle gammaTensor(tensorInfo);
658 
659  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
660  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
661  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
662  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
663 
664  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
665  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
666  data.m_Mean = &meanTensor;
667  data.m_Variance = &varianceTensor;
668  data.m_Beta = &betaTensor;
669  data.m_Gamma = &gammaTensor;
670  data.m_Parameters.m_Eps = 0.01f;
671 
673  armnn::WorkloadInfo refInfo = info;
674  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
675  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
676 
677  std::unique_ptr<armnn::IWorkload> workload
678  = workloadFactory.CreateWorkload(armnn::LayerType::BatchNormalization, data, info);
679  std::unique_ptr<armnn::IWorkload> workloadRef
680  = refWorkloadFactory.CreateWorkload(armnn::LayerType::BatchNormalization, refData, refInfo);
681 
682  inputHandle->Allocate();
683  outputHandle->Allocate();
684  inputHandleRef->Allocate();
685  outputHandleRef->Allocate();
686 
687  CopyDataToITensorHandle(inputHandle.get(), input.data());
688  CopyDataToITensorHandle(inputHandleRef.get(), input.data());
689 
690  workload->PostAllocationConfigure();
691  workload->Execute();
692  workloadRef->PostAllocationConfigure();
693  workloadRef->Execute();
694 
695  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
696  CopyDataFromITensorHandle(expectedOutput.data(), outputHandleRef.get());
697 
698  return LayerTestResult<float, 4>(actualOutput,
699  expectedOutput,
700  outputHandle->GetShape(),
701  outputTensorInfo.GetShape());
702 }
LayerTestResult< float, 4 > CompareBatchNormTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
DataLayout
Definition: Types.hpp:49
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
LayerTestResult< uint8_t, 4 > BatchNormUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > BatchNormUint8NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > BatchNormInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const ConstTensorHandle * m_Variance
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
void IgnoreUnused(Ts &&...)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< int16_t, 4 > BatchNormInt16NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< float, 4 > BatchNormFloat32NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:475
LayerTestResult< float, 4 > BatchNormFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< armnn::Half, 4 > BatchNormFloat16NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< armnn::Half, 4 > BatchNormFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const
Definition: Tensor.hpp:196