ArmNN
 20.02
BatchNormalizationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
13 
17 
20 
21 #include <test/TensorHelpers.hpp>
22 
23 namespace
24 {
25 
26 using namespace armnnUtils;
27 
28 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
29 LayerTestResult<T, 4> BatchNormTestImpl(
30  armnn::IWorkloadFactory& workloadFactory,
32  const armnn::TensorShape& inputOutputTensorShape,
33  const std::vector<float>& inputValues,
34  const std::vector<float>& expectedOutputValues,
35  float qScale,
36  int32_t qOffset,
37  armnn::DataLayout dataLayout)
38 {
39  IgnoreUnused(memoryManager);
40  armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
41  armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
42 
43  armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
44 
45  armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
46  ArmnnType);
47 
48  // Set quantization parameters if the requested type is a quantized type.
49  if (armnn::IsQuantizedType<T>())
50  {
51  inputTensorInfo.SetQuantizationScale(qScale);
52  inputTensorInfo.SetQuantizationOffset(qOffset);
53  outputTensorInfo.SetQuantizationScale(qScale);
54  outputTensorInfo.SetQuantizationOffset(qOffset);
55  tensorInfo.SetQuantizationScale(qScale);
56  tensorInfo.SetQuantizationOffset(qOffset);
57  }
58 
59  auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputValues, qScale, qOffset));
60 
61  // These values are per-channel of the input.
62  auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
63  auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
64  auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
65  auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
66 
67  LayerTestResult<T, 4> result(outputTensorInfo);
68 
69  result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
70  QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
71 
72  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
73  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
74 
75  armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
76  armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
77  armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
78  armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
79 
81  descriptor.m_Mean = &meanTensor;
82  descriptor.m_Variance = &varianceTensor;
83  descriptor.m_Beta = &betaTensor;
84  descriptor.m_Gamma = &gammaTensor;
85  descriptor.m_Parameters.m_Eps = 0.0f;
86  descriptor.m_Parameters.m_DataLayout = dataLayout;
88 
89  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
90  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
91  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
92  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
93 
94  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
95  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
96 
97  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
98 
99  inputHandle->Allocate();
100  outputHandle->Allocate();
101 
102  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
103 
104  workload->Execute();
105 
106  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
107 
108  return result;
109 }
110 
111 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
112 LayerTestResult<T,4> BatchNormTestNhwcImpl(
113  armnn::IWorkloadFactory& workloadFactory,
115  float qScale,
116  int32_t qOffset)
117 {
118  IgnoreUnused(memoryManager);
119 
120  const unsigned int width = 2;
121  const unsigned int height = 3;
122  const unsigned int channels = 2;
123  const unsigned int num = 1;
124 
125  armnn::TensorInfo inputTensorInfo({num, height, width, channels}, ArmnnType);
126  armnn::TensorInfo outputTensorInfo({num, height, width, channels}, ArmnnType);
127  armnn::TensorInfo tensorInfo({channels}, ArmnnType);
128 
129  // Set quantization parameters if the requested type is a quantized type.
130  if(armnn::IsQuantizedType<T>())
131  {
132  inputTensorInfo.SetQuantizationScale(qScale);
133  inputTensorInfo.SetQuantizationOffset(qOffset);
134  outputTensorInfo.SetQuantizationScale(qScale);
135  outputTensorInfo.SetQuantizationOffset(qOffset);
136  tensorInfo.SetQuantizationScale(qScale);
137  tensorInfo.SetQuantizationOffset(qOffset);
138  }
139 
140  auto input = MakeTensor<T, 4>(inputTensorInfo,
141  QuantizedVector<T>(
142  {
143  1.f, 1.f, 4.f, 1.f,
144  4.f, 4.f, 2.f, 1.f,
145  1.f, -2.f, 6.f, 4.f
146  },
147  qScale, qOffset));
148  // These values are per-channel of the input.
149  auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
150  auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
151  auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
152  auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
153  LayerTestResult<T,4> ret(outputTensorInfo);
154 
155  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
156  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
157 
159  armnn::WorkloadInfo info;
160  armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
161  armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
162  armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
163  armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
164 
165  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
166  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
167  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
168  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
169 
170  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
171  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
172  data.m_Mean = &meanTensor;
173  data.m_Variance = &varianceTensor;
174  data.m_Beta = &betaTensor;
175  data.m_Gamma = &gammaTensor;
176  data.m_Parameters.m_Eps = 0.0f;
178 
179  // For each channel:
180  // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
181  // multiply by gamma and add beta
182  ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
183  QuantizedVector<T>(
184  {
185  1.f, 3.f, 4.f, 3.f,
186  4.f, 4.f, 2.f, 3.f,
187  1.f, 2.f, 6.f, 4.f
188  },
189  qScale, qOffset));
190 
191  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
192 
193  inputHandle->Allocate();
194  outputHandle->Allocate();
195 
196  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
197 
198  workload->Execute();
199 
200  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
201 
202  return ret;
203 }
204 
205 } // anonymous namespace
206 
208  armnn::IWorkloadFactory& workloadFactory,
210 {
211  // BatchSize: 1
212  // Channels: 2
213  // Height: 3
214  // Width: 2
215 
216  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
217  std::vector<float> inputValues
218  {
219  // Batch 0, Channel 0, Height (3) x Width (2)
220  1.f, 4.f,
221  4.f, 2.f,
222  1.f, 6.f,
223 
224  // Batch 0, Channel 1, Height (3) x Width (2)
225  1.f, 1.f,
226  4.f, 1.f,
227  -2.f, 4.f
228  };
229  std::vector<float> expectedOutputValues
230  {
231  // Batch 0, Channel 0, Height (3) x Width (2)
232  1.f, 4.f,
233  4.f, 2.f,
234  1.f, 6.f,
235 
236  // Batch 0, Channel 1, Height (3) x Width (2)
237  3.f, 3.f,
238  4.f, 3.f,
239  2.f, 4.f
240  };
241 
242  return BatchNormTestImpl<armnn::DataType::Float32>(
243  workloadFactory,
244  memoryManager,
245  inputOutputShape,
246  inputValues,
247  expectedOutputValues,
248  0.f,
249  0,
251 }
252 
254  armnn::IWorkloadFactory& workloadFactory,
256 {
257  // BatchSize: 1
258  // Height: 3
259  // Width: 2
260  // Channels: 2
261 
262  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
263  std::vector<float> inputValues
264  {
265  // Batch 0, Height 0, Width (2) x Channel (2)
266  1.f, 1.f,
267  4.f, 1.f,
268 
269  // Batch 0, Height 1, Width (2) x Channel (2)
270  4.f, 4.f,
271  2.f, 1.f,
272 
273  // Batch 0, Height 2, Width (2) x Channel (2)
274  1.f, -2.f,
275  6.f, 4.f
276  };
277  std::vector<float> expectedOutputValues
278  {
279  // Batch 0, Height 0, Width (2) x Channel (2)
280  1.f, 3.f,
281  4.f, 3.f,
282 
283  // Batch 0, Height 1, Width (2) x Channel (2)
284  4.f, 4.f,
285  2.f, 3.f,
286 
287  // Batch 0, Height 2, Width (2) x Channel (2)
288  1.f, 2.f,
289  6.f, 4.f
290  };
291 
292  return BatchNormTestImpl<armnn::DataType::Float32>(
293  workloadFactory,
294  memoryManager,
295  inputOutputShape,
296  inputValues,
297  expectedOutputValues,
298  0.f,
299  0,
301 }
302 
304  armnn::IWorkloadFactory& workloadFactory,
306 {
307  // BatchSize: 1
308  // Channels: 2
309  // Height: 3
310  // Width: 2
311 
312  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
313  std::vector<float> inputValues
314  {
315  // Batch 0, Channel 0, Height (3) x Width (2)
316  1.f, 4.f,
317  4.f, 2.f,
318  1.f, 6.f,
319 
320  // Batch 0, Channel 1, Height (3) x Width (2)
321  1.f, 1.f,
322  4.f, 1.f,
323  -2.f, 4.f
324  };
325  std::vector<float> expectedOutputValues
326  {
327  // Batch 0, Channel 0, Height (3) x Width (2)
328  1.f, 4.f,
329  4.f, 2.f,
330  1.f, 6.f,
331 
332  // Batch 0, Channel 1, Height (3) x Width (2)
333  3.f, 3.f,
334  4.f, 3.f,
335  2.f, 4.f
336  };
337 
338  return BatchNormTestImpl<armnn::DataType::Float16>(
339  workloadFactory,
340  memoryManager,
341  inputOutputShape,
342  inputValues,
343  expectedOutputValues,
344  0.f,
345  0,
347 }
348 
350  armnn::IWorkloadFactory& workloadFactory,
352 {
353  // BatchSize: 1
354  // Height: 3
355  // Width: 2
356  // Channels: 2
357 
358  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
359  std::vector<float> inputValues
360  {
361  // Batch 0, Height 0, Width (2) x Channel (2)
362  1.f, 1.f,
363  4.f, 1.f,
364 
365  // Batch 0, Height 1, Width (2) x Channel (2)
366  4.f, 4.f,
367  2.f, 1.f,
368 
369  // Batch 0, Height 2, Width (2) x Channel (2)
370  1.f, -2.f,
371  6.f, 4.f
372  };
373  std::vector<float> expectedOutputValues
374  {
375  // Batch 0, Height 0, Width (2) x Channel (2)
376  1.f, 3.f,
377  4.f, 3.f,
378 
379  // Batch 0, Height 1, Width (2) x Channel (2)
380  4.f, 4.f,
381  2.f, 3.f,
382 
383  // Batch 0, Height 2, Width (2) x Channel (2)
384  1.f, 2.f,
385  6.f, 4.f
386  };
387 
388  return BatchNormTestImpl<armnn::DataType::Float16>(
389  workloadFactory,
390  memoryManager,
391  inputOutputShape,
392  inputValues,
393  expectedOutputValues,
394  0.f,
395  0,
397 }
398 
400  armnn::IWorkloadFactory& workloadFactory,
402 {
403  // BatchSize: 1
404  // Channels: 2
405  // Height: 3
406  // Width: 2
407 
408  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
409  std::vector<float> inputValues
410  {
411  // Batch 0, Channel 0, Height (3) x Width (2)
412  1.f, 4.f,
413  4.f, 2.f,
414  1.f, 6.f,
415 
416  // Batch 0, Channel 1, Height (3) x Width (2)
417  1.f, 1.f,
418  4.f, 1.f,
419  -2.f, 4.f
420  };
421  std::vector<float> expectedOutputValues
422  {
423  // Batch 0, Channel 0, Height (3) x Width (2)
424  1.f, 4.f,
425  4.f, 2.f,
426  1.f, 6.f,
427 
428  // Batch 0, Channel 1, Height (3) x Width (2)
429  3.f, 3.f,
430  4.f, 3.f,
431  2.f, 4.f
432  };
433 
434  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
435  workloadFactory,
436  memoryManager,
437  inputOutputShape,
438  inputValues,
439  expectedOutputValues,
440  1.f / 20.f,
441  50,
443 }
444 
446  armnn::IWorkloadFactory& workloadFactory,
448 {
449  // BatchSize: 1
450  // Height: 3
451  // Width: 2
452  // Channels: 2
453 
454  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
455  std::vector<float> inputValues
456  {
457  // Batch 0, Height 0, Width (2) x Channel (2)
458  1.f, 1.f,
459  4.f, 1.f,
460 
461  // Batch 0, Height 1, Width (2) x Channel (2)
462  4.f, 4.f,
463  2.f, 1.f,
464 
465  // Batch 0, Height 2, Width (2) x Channel (2)
466  1.f, -2.f,
467  6.f, 4.f
468  };
469  std::vector<float> expectedOutputValues
470  {
471  // Batch 0, Height 0, Width (2) x Channel (2)
472  1.f, 3.f,
473  4.f, 3.f,
474 
475  // Batch 0, Height 1, Width (2) x Channel (2)
476  4.f, 4.f,
477  2.f, 3.f,
478 
479  // Batch 0, Height 2, Width (2) x Channel (2)
480  1.f, 2.f,
481  6.f, 4.f
482  };
483 
484  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
485  workloadFactory,
486  memoryManager,
487  inputOutputShape, inputValues, expectedOutputValues,
488  1.f/20.f, 50, armnn::DataLayout::NHWC);
489 }
490 
492  armnn::IWorkloadFactory& workloadFactory,
494 {
495  // BatchSize: 1
496  // Channels: 2
497  // Height: 3
498  // Width: 2
499 
500  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
501  std::vector<float> inputValues
502  {
503  // Batch 0, Channel 0, Height (3) x Width (2)
504  1.f, 4.f,
505  4.f, 2.f,
506  1.f, 6.f,
507 
508  // Batch 0, Channel 1, Height (3) x Width (2)
509  1.f, 1.f,
510  4.f, 1.f,
511  -2.f, 4.f
512  };
513  std::vector<float> expectedOutputValues
514  {
515  // Batch 0, Channel 0, Height (3) x Width (2)
516  1.f, 4.f,
517  4.f, 2.f,
518  1.f, 6.f,
519 
520  // Batch 0, Channel 1, Height (3) x Width (2)
521  3.f, 3.f,
522  4.f, 3.f,
523  2.f, 4.f
524  };
525 
526  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
527  workloadFactory,
528  memoryManager,
529  inputOutputShape,
530  inputValues,
531  expectedOutputValues,
532  1.f / 20.f,
533  50,
535 }
536 
538  armnn::IWorkloadFactory& workloadFactory,
540 {
541  // BatchSize: 1
542  // Height: 3
543  // Width: 2
544  // Channels: 2
545 
546  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
547  std::vector<float> inputValues
548  {
549  // Batch 0, Height 0, Width (2) x Channel (2)
550  1.f, 1.f,
551  4.f, 1.f,
552 
553  // Batch 0, Height 1, Width (2) x Channel (2)
554  4.f, 4.f,
555  2.f, 1.f,
556 
557  // Batch 0, Height 2, Width (2) x Channel (2)
558  1.f, -2.f,
559  6.f, 4.f
560  };
561  std::vector<float> expectedOutputValues
562  {
563  // Batch 0, Height 0, Width (2) x Channel (2)
564  1.f, 3.f,
565  4.f, 3.f,
566 
567  // Batch 0, Height 1, Width (2) x Channel (2)
568  4.f, 4.f,
569  2.f, 3.f,
570 
571  // Batch 0, Height 2, Width (2) x Channel (2)
572  1.f, 2.f,
573  6.f, 4.f
574  };
575 
576  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
577  workloadFactory,
578  memoryManager,
579  inputOutputShape,
580  inputValues,
581  expectedOutputValues,
582  1.f / 20.f,
583  50,
585 }
586 
588  armnn::IWorkloadFactory& workloadFactory,
590  armnn::IWorkloadFactory& refWorkloadFactory)
591 {
592  IgnoreUnused(memoryManager);
593  const unsigned int width = 2;
594  const unsigned int height = 3;
595  const unsigned int channels = 5;
596  const unsigned int batchSize = 3;
597 
598  armnn::TensorInfo inputTensorInfo;
599  armnn::TensorInfo outputTensorInfo;
600  armnn::TensorInfo tensorInfo;
601 
602  constexpr unsigned int shape[] = {batchSize, channels, height, width};
603  constexpr unsigned int tensorShape[] = {channels};
604 
605  inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
606  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
607  tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
608 
609  auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
610 
611  auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
612  auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
613  auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
614  auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
615 
616  LayerTestResult<float,4> ret(outputTensorInfo);
617 
618  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
619  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
620 
621  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
622  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
623 
625  armnn::WorkloadInfo info;
626  armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
627  armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
628  armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
629  armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
630 
631  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
632  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
633  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
634  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
635 
636  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
637  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
638  data.m_Mean = &meanTensor;
639  data.m_Variance = &varianceTensor;
640  data.m_Beta = &betaTensor;
641  data.m_Gamma = &gammaTensor;
642  data.m_Parameters.m_Eps = 0.01f;
643 
645  armnn::WorkloadInfo refInfo = info;
646  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
647  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
648 
649  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
650  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
651 
652  inputHandle->Allocate();
653  outputHandle->Allocate();
654  inputHandleRef->Allocate();
655  outputHandleRef->Allocate();
656 
657  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
658  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
659 
660  workload->PostAllocationConfigure();
661  workload->Execute();
662  workloadRef->PostAllocationConfigure();
663  workloadRef->Execute();
664 
665  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
666  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
667 
668  return ret;
669 }
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
const ConstCpuTensorHandle * m_Gamma
const ConstCpuTensorHandle * m_Beta
DataLayout
Definition: Types.hpp:49
LayerTestResult< int16_t, 4 > BatchNormInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
const ConstCpuTensorHandle * m_Mean
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstCpuTensorHandle * m_Variance
void IgnoreUnused(Ts &&...)
LayerTestResult< uint8_t, 4 > BatchNormUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > BatchNormUint8NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > BatchNormFloat32NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< armnn::Half, 4 > BatchNormFloat16NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > BatchNormInt16NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< armnn::Half, 4 > BatchNormFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:259
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
LayerTestResult< float, 4 > CompareBatchNormTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< float, 4 > BatchNormFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)