ArmNN
 20.08
BatchNormalizationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
13 
17 
20 
21 #include <test/TensorHelpers.hpp>
22 
23 namespace
24 {
25 
26 using namespace armnnUtils;
27 
28 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
29 LayerTestResult<T, 4> BatchNormTestImpl(
30  armnn::IWorkloadFactory& workloadFactory,
32  const armnn::TensorShape& inputOutputTensorShape,
33  const std::vector<float>& inputValues,
34  const std::vector<float>& expectedOutputValues,
35  float qScale,
36  int32_t qOffset,
37  armnn::DataLayout dataLayout)
38 {
39  IgnoreUnused(memoryManager);
40  armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
41  armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
42 
43  armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
44 
45  armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
46  ArmnnType);
47 
48  // Set quantization parameters if the requested type is a quantized type.
49  if (armnn::IsQuantizedType<T>())
50  {
51  inputTensorInfo.SetQuantizationScale(qScale);
52  inputTensorInfo.SetQuantizationOffset(qOffset);
53  outputTensorInfo.SetQuantizationScale(qScale);
54  outputTensorInfo.SetQuantizationOffset(qOffset);
55  tensorInfo.SetQuantizationScale(qScale);
56  tensorInfo.SetQuantizationOffset(qOffset);
57  }
58 
59  auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputValues, qScale, qOffset));
60 
61  // These values are per-channel of the input.
62  auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
63  auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
64  auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
65  auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
66 
67  LayerTestResult<T, 4> result(outputTensorInfo);
68 
69  result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
70  QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
71 
73  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
74  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
76 
77  armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
78  armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
79  armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
80  armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
81 
83  descriptor.m_Mean = &meanTensor;
84  descriptor.m_Variance = &varianceTensor;
85  descriptor.m_Beta = &betaTensor;
86  descriptor.m_Gamma = &gammaTensor;
87  descriptor.m_Parameters.m_Eps = 0.0f;
88  descriptor.m_Parameters.m_DataLayout = dataLayout;
90 
91  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
92  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
93  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
94  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
95 
96  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
97  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
98 
99  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
100 
101  inputHandle->Allocate();
102  outputHandle->Allocate();
103 
104  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
105 
106  workload->Execute();
107 
108  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
109 
110  return result;
111 }
112 
113 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
114 LayerTestResult<T,4> BatchNormTestNhwcImpl(
115  armnn::IWorkloadFactory& workloadFactory,
117  float qScale,
118  int32_t qOffset)
119 {
120  IgnoreUnused(memoryManager);
121 
122  const unsigned int width = 2;
123  const unsigned int height = 3;
124  const unsigned int channels = 2;
125  const unsigned int num = 1;
126 
127  armnn::TensorInfo inputTensorInfo({num, height, width, channels}, ArmnnType);
128  armnn::TensorInfo outputTensorInfo({num, height, width, channels}, ArmnnType);
129  armnn::TensorInfo tensorInfo({channels}, ArmnnType);
130 
131  // Set quantization parameters if the requested type is a quantized type.
132  if(armnn::IsQuantizedType<T>())
133  {
134  inputTensorInfo.SetQuantizationScale(qScale);
135  inputTensorInfo.SetQuantizationOffset(qOffset);
136  outputTensorInfo.SetQuantizationScale(qScale);
137  outputTensorInfo.SetQuantizationOffset(qOffset);
138  tensorInfo.SetQuantizationScale(qScale);
139  tensorInfo.SetQuantizationOffset(qOffset);
140  }
141 
142  auto input = MakeTensor<T, 4>(inputTensorInfo,
143  QuantizedVector<T>(
144  {
145  1.f, 1.f, 4.f, 1.f,
146  4.f, 4.f, 2.f, 1.f,
147  1.f, -2.f, 6.f, 4.f
148  },
149  qScale, qOffset));
150  // These values are per-channel of the input.
151  auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
152  auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
153  auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
154  auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
155  LayerTestResult<T,4> ret(outputTensorInfo);
156 
158  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
159  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
161 
163  armnn::WorkloadInfo info;
164  armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
165  armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
166  armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
167  armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
168 
169  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
170  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
171  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
172  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
173 
174  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
175  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
176  data.m_Mean = &meanTensor;
177  data.m_Variance = &varianceTensor;
178  data.m_Beta = &betaTensor;
179  data.m_Gamma = &gammaTensor;
180  data.m_Parameters.m_Eps = 0.0f;
181  data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
182 
183  // For each channel:
184  // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
185  // multiply by gamma and add beta
186  ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
187  QuantizedVector<T>(
188  {
189  1.f, 3.f, 4.f, 3.f,
190  4.f, 4.f, 2.f, 3.f,
191  1.f, 2.f, 6.f, 4.f
192  },
193  qScale, qOffset));
194 
195  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
196 
197  inputHandle->Allocate();
198  outputHandle->Allocate();
199 
200  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
201 
202  workload->Execute();
203 
204  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
205 
206  return ret;
207 }
208 
209 } // anonymous namespace
210 
212  armnn::IWorkloadFactory& workloadFactory,
214 {
215  // BatchSize: 1
216  // Channels: 2
217  // Height: 3
218  // Width: 2
219 
220  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
221  std::vector<float> inputValues
222  {
223  // Batch 0, Channel 0, Height (3) x Width (2)
224  1.f, 4.f,
225  4.f, 2.f,
226  1.f, 6.f,
227 
228  // Batch 0, Channel 1, Height (3) x Width (2)
229  1.f, 1.f,
230  4.f, 1.f,
231  -2.f, 4.f
232  };
233  std::vector<float> expectedOutputValues
234  {
235  // Batch 0, Channel 0, Height (3) x Width (2)
236  1.f, 4.f,
237  4.f, 2.f,
238  1.f, 6.f,
239 
240  // Batch 0, Channel 1, Height (3) x Width (2)
241  3.f, 3.f,
242  4.f, 3.f,
243  2.f, 4.f
244  };
245 
246  return BatchNormTestImpl<armnn::DataType::Float32>(
247  workloadFactory,
248  memoryManager,
249  inputOutputShape,
250  inputValues,
251  expectedOutputValues,
252  0.f,
253  0,
255 }
256 
258  armnn::IWorkloadFactory& workloadFactory,
260 {
261  // BatchSize: 1
262  // Height: 3
263  // Width: 2
264  // Channels: 2
265 
266  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
267  std::vector<float> inputValues
268  {
269  // Batch 0, Height 0, Width (2) x Channel (2)
270  1.f, 1.f,
271  4.f, 1.f,
272 
273  // Batch 0, Height 1, Width (2) x Channel (2)
274  4.f, 4.f,
275  2.f, 1.f,
276 
277  // Batch 0, Height 2, Width (2) x Channel (2)
278  1.f, -2.f,
279  6.f, 4.f
280  };
281  std::vector<float> expectedOutputValues
282  {
283  // Batch 0, Height 0, Width (2) x Channel (2)
284  1.f, 3.f,
285  4.f, 3.f,
286 
287  // Batch 0, Height 1, Width (2) x Channel (2)
288  4.f, 4.f,
289  2.f, 3.f,
290 
291  // Batch 0, Height 2, Width (2) x Channel (2)
292  1.f, 2.f,
293  6.f, 4.f
294  };
295 
296  return BatchNormTestImpl<armnn::DataType::Float32>(
297  workloadFactory,
298  memoryManager,
299  inputOutputShape,
300  inputValues,
301  expectedOutputValues,
302  0.f,
303  0,
305 }
306 
308  armnn::IWorkloadFactory& workloadFactory,
310 {
311  // BatchSize: 1
312  // Channels: 2
313  // Height: 3
314  // Width: 2
315 
316  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
317  std::vector<float> inputValues
318  {
319  // Batch 0, Channel 0, Height (3) x Width (2)
320  1.f, 4.f,
321  4.f, 2.f,
322  1.f, 6.f,
323 
324  // Batch 0, Channel 1, Height (3) x Width (2)
325  1.f, 1.f,
326  4.f, 1.f,
327  -2.f, 4.f
328  };
329  std::vector<float> expectedOutputValues
330  {
331  // Batch 0, Channel 0, Height (3) x Width (2)
332  1.f, 4.f,
333  4.f, 2.f,
334  1.f, 6.f,
335 
336  // Batch 0, Channel 1, Height (3) x Width (2)
337  3.f, 3.f,
338  4.f, 3.f,
339  2.f, 4.f
340  };
341 
342  return BatchNormTestImpl<armnn::DataType::Float16>(
343  workloadFactory,
344  memoryManager,
345  inputOutputShape,
346  inputValues,
347  expectedOutputValues,
348  0.f,
349  0,
351 }
352 
354  armnn::IWorkloadFactory& workloadFactory,
356 {
357  // BatchSize: 1
358  // Height: 3
359  // Width: 2
360  // Channels: 2
361 
362  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
363  std::vector<float> inputValues
364  {
365  // Batch 0, Height 0, Width (2) x Channel (2)
366  1.f, 1.f,
367  4.f, 1.f,
368 
369  // Batch 0, Height 1, Width (2) x Channel (2)
370  4.f, 4.f,
371  2.f, 1.f,
372 
373  // Batch 0, Height 2, Width (2) x Channel (2)
374  1.f, -2.f,
375  6.f, 4.f
376  };
377  std::vector<float> expectedOutputValues
378  {
379  // Batch 0, Height 0, Width (2) x Channel (2)
380  1.f, 3.f,
381  4.f, 3.f,
382 
383  // Batch 0, Height 1, Width (2) x Channel (2)
384  4.f, 4.f,
385  2.f, 3.f,
386 
387  // Batch 0, Height 2, Width (2) x Channel (2)
388  1.f, 2.f,
389  6.f, 4.f
390  };
391 
392  return BatchNormTestImpl<armnn::DataType::Float16>(
393  workloadFactory,
394  memoryManager,
395  inputOutputShape,
396  inputValues,
397  expectedOutputValues,
398  0.f,
399  0,
401 }
402 
404  armnn::IWorkloadFactory& workloadFactory,
406 {
407  // BatchSize: 1
408  // Channels: 2
409  // Height: 3
410  // Width: 2
411 
412  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
413  std::vector<float> inputValues
414  {
415  // Batch 0, Channel 0, Height (3) x Width (2)
416  1.f, 4.f,
417  4.f, 2.f,
418  1.f, 6.f,
419 
420  // Batch 0, Channel 1, Height (3) x Width (2)
421  1.f, 1.f,
422  4.f, 1.f,
423  -2.f, 4.f
424  };
425  std::vector<float> expectedOutputValues
426  {
427  // Batch 0, Channel 0, Height (3) x Width (2)
428  1.f, 4.f,
429  4.f, 2.f,
430  1.f, 6.f,
431 
432  // Batch 0, Channel 1, Height (3) x Width (2)
433  3.f, 3.f,
434  4.f, 3.f,
435  2.f, 4.f
436  };
437 
438  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
439  workloadFactory,
440  memoryManager,
441  inputOutputShape,
442  inputValues,
443  expectedOutputValues,
444  1.f / 20.f,
445  50,
447 }
448 
450  armnn::IWorkloadFactory& workloadFactory,
452 {
453  // BatchSize: 1
454  // Height: 3
455  // Width: 2
456  // Channels: 2
457 
458  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
459  std::vector<float> inputValues
460  {
461  // Batch 0, Height 0, Width (2) x Channel (2)
462  1.f, 1.f,
463  4.f, 1.f,
464 
465  // Batch 0, Height 1, Width (2) x Channel (2)
466  4.f, 4.f,
467  2.f, 1.f,
468 
469  // Batch 0, Height 2, Width (2) x Channel (2)
470  1.f, -2.f,
471  6.f, 4.f
472  };
473  std::vector<float> expectedOutputValues
474  {
475  // Batch 0, Height 0, Width (2) x Channel (2)
476  1.f, 3.f,
477  4.f, 3.f,
478 
479  // Batch 0, Height 1, Width (2) x Channel (2)
480  4.f, 4.f,
481  2.f, 3.f,
482 
483  // Batch 0, Height 2, Width (2) x Channel (2)
484  1.f, 2.f,
485  6.f, 4.f
486  };
487 
488  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
489  workloadFactory,
490  memoryManager,
491  inputOutputShape, inputValues, expectedOutputValues,
492  1.f/20.f, 50, armnn::DataLayout::NHWC);
493 }
494 
496  armnn::IWorkloadFactory& workloadFactory,
498 {
499  // BatchSize: 1
500  // Channels: 2
501  // Height: 3
502  // Width: 2
503 
504  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
505  std::vector<float> inputValues
506  {
507  // Batch 0, Channel 0, Height (3) x Width (2)
508  1.f, 4.f,
509  4.f, 2.f,
510  1.f, 6.f,
511 
512  // Batch 0, Channel 1, Height (3) x Width (2)
513  1.f, 1.f,
514  4.f, 1.f,
515  -2.f, 4.f
516  };
517  std::vector<float> expectedOutputValues
518  {
519  // Batch 0, Channel 0, Height (3) x Width (2)
520  1.f, 4.f,
521  4.f, 2.f,
522  1.f, 6.f,
523 
524  // Batch 0, Channel 1, Height (3) x Width (2)
525  3.f, 3.f,
526  4.f, 3.f,
527  2.f, 4.f
528  };
529 
530  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
531  workloadFactory,
532  memoryManager,
533  inputOutputShape,
534  inputValues,
535  expectedOutputValues,
536  1.f / 20.f,
537  50,
539 }
540 
542  armnn::IWorkloadFactory& workloadFactory,
544 {
545  // BatchSize: 1
546  // Height: 3
547  // Width: 2
548  // Channels: 2
549 
550  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
551  std::vector<float> inputValues
552  {
553  // Batch 0, Height 0, Width (2) x Channel (2)
554  1.f, 1.f,
555  4.f, 1.f,
556 
557  // Batch 0, Height 1, Width (2) x Channel (2)
558  4.f, 4.f,
559  2.f, 1.f,
560 
561  // Batch 0, Height 2, Width (2) x Channel (2)
562  1.f, -2.f,
563  6.f, 4.f
564  };
565  std::vector<float> expectedOutputValues
566  {
567  // Batch 0, Height 0, Width (2) x Channel (2)
568  1.f, 3.f,
569  4.f, 3.f,
570 
571  // Batch 0, Height 1, Width (2) x Channel (2)
572  4.f, 4.f,
573  2.f, 3.f,
574 
575  // Batch 0, Height 2, Width (2) x Channel (2)
576  1.f, 2.f,
577  6.f, 4.f
578  };
579 
580  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
581  workloadFactory,
582  memoryManager,
583  inputOutputShape,
584  inputValues,
585  expectedOutputValues,
586  1.f / 20.f,
587  50,
589 }
590 
592  armnn::IWorkloadFactory& workloadFactory,
594  armnn::IWorkloadFactory& refWorkloadFactory)
595 {
596  IgnoreUnused(memoryManager);
597  const unsigned int width = 2;
598  const unsigned int height = 3;
599  const unsigned int channels = 5;
600  const unsigned int batchSize = 3;
601 
602  armnn::TensorInfo inputTensorInfo;
603  armnn::TensorInfo outputTensorInfo;
604  armnn::TensorInfo tensorInfo;
605 
606  constexpr unsigned int shape[] = {batchSize, channels, height, width};
607  constexpr unsigned int tensorShape[] = {channels};
608 
609  inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
610  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
611  tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
612 
613  auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
614 
615  auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
616  auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
617  auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
618  auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
619 
620  LayerTestResult<float,4> ret(outputTensorInfo);
621 
623  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
624  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
625 
626  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
627  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
629 
631  armnn::WorkloadInfo info;
632  armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
633  armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
634  armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
635  armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
636 
637  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
638  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
639  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
640  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
641 
642  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
643  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
644  data.m_Mean = &meanTensor;
645  data.m_Variance = &varianceTensor;
646  data.m_Beta = &betaTensor;
647  data.m_Gamma = &gammaTensor;
648  data.m_Parameters.m_Eps = 0.01f;
649 
651  armnn::WorkloadInfo refInfo = info;
652  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
653  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
654 
655  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
656  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
657 
658  inputHandle->Allocate();
659  outputHandle->Allocate();
660  inputHandleRef->Allocate();
661  outputHandleRef->Allocate();
662 
663  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
664  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
665 
666  workload->PostAllocationConfigure();
667  workload->Execute();
668  workloadRef->PostAllocationConfigure();
669  workloadRef->Execute();
670 
671  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
672  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
673 
674  return ret;
675 }
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
const ConstCpuTensorHandle * m_Gamma
const ConstCpuTensorHandle * m_Beta
DataLayout
Definition: Types.hpp:49
LayerTestResult< int16_t, 4 > BatchNormInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
const ConstCpuTensorHandle * m_Mean
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstCpuTensorHandle * m_Variance
void IgnoreUnused(Ts &&...)
LayerTestResult< uint8_t, 4 > BatchNormUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 4 > BatchNormUint8NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
LayerTestResult< float, 4 > BatchNormFloat32NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< armnn::Half, 4 > BatchNormFloat16NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > BatchNormInt16NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< armnn::Half, 4 > BatchNormFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:465
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
Contains information about inputs and outputs to a layer.
LayerTestResult< float, 4 > CompareBatchNormTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< float, 4 > BatchNormFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)