ArmNN
 21.02
BatchNormalizationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
13 
18 
21 
22 #include <test/TensorHelpers.hpp>
23 
24 namespace
25 {
26 
27 using namespace armnnUtils;
28 
29 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
30 LayerTestResult<T, 4> BatchNormTestImpl(
31  armnn::IWorkloadFactory& workloadFactory,
33  const armnn::ITensorHandleFactory& tensorHandleFactory,
34  const armnn::TensorShape& inputOutputTensorShape,
35  const std::vector<float>& inputValues,
36  const std::vector<float>& expectedOutputValues,
37  float qScale,
38  int32_t qOffset,
39  armnn::DataLayout dataLayout)
40 {
41  IgnoreUnused(memoryManager);
42  armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
43  armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
44 
45  armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
46 
47  armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
48  ArmnnType);
49 
50  // Set quantization parameters if the requested type is a quantized type.
51  if (armnn::IsQuantizedType<T>())
52  {
53  inputTensorInfo.SetQuantizationScale(qScale);
54  inputTensorInfo.SetQuantizationOffset(qOffset);
55  outputTensorInfo.SetQuantizationScale(qScale);
56  outputTensorInfo.SetQuantizationOffset(qOffset);
57  tensorInfo.SetQuantizationScale(qScale);
58  tensorInfo.SetQuantizationOffset(qOffset);
59  }
60 
61  auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputValues, qScale, qOffset));
62 
63  // These values are per-channel of the input.
64  auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
65  auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
66  auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
67  auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
68 
69  LayerTestResult<T, 4> result(outputTensorInfo);
70 
71  result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
72  QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
73 
74  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
75  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
76 
77  armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
78  armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
79  armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
80  armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
81 
83  descriptor.m_Mean = &meanTensor;
84  descriptor.m_Variance = &varianceTensor;
85  descriptor.m_Beta = &betaTensor;
86  descriptor.m_Gamma = &gammaTensor;
87  descriptor.m_Parameters.m_Eps = 0.0f;
88  descriptor.m_Parameters.m_DataLayout = dataLayout;
90 
91  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
92  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
93  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
94  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
95 
96  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
97  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
98 
99  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
100 
101  inputHandle->Allocate();
102  outputHandle->Allocate();
103 
104  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
105 
106  workload->Execute();
107 
108  CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
109 
110  return result;
111 }
112 
113 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
114 LayerTestResult<T,4> BatchNormTestNhwcImpl(
115  armnn::IWorkloadFactory& workloadFactory,
117  const armnn::ITensorHandleFactory& tensorHandleFactory,
118  float qScale,
119  int32_t qOffset)
120 {
121  IgnoreUnused(memoryManager);
122 
123  const unsigned int width = 2;
124  const unsigned int height = 3;
125  const unsigned int channels = 2;
126  const unsigned int num = 1;
127 
128  armnn::TensorInfo inputTensorInfo({num, height, width, channels}, ArmnnType);
129  armnn::TensorInfo outputTensorInfo({num, height, width, channels}, ArmnnType);
130  armnn::TensorInfo tensorInfo({channels}, ArmnnType);
131 
132  // Set quantization parameters if the requested type is a quantized type.
133  if(armnn::IsQuantizedType<T>())
134  {
135  inputTensorInfo.SetQuantizationScale(qScale);
136  inputTensorInfo.SetQuantizationOffset(qOffset);
137  outputTensorInfo.SetQuantizationScale(qScale);
138  outputTensorInfo.SetQuantizationOffset(qOffset);
139  tensorInfo.SetQuantizationScale(qScale);
140  tensorInfo.SetQuantizationOffset(qOffset);
141  }
142 
143  auto input = MakeTensor<T, 4>(inputTensorInfo,
144  QuantizedVector<T>(
145  {
146  1.f, 1.f, 4.f, 1.f,
147  4.f, 4.f, 2.f, 1.f,
148  1.f, -2.f, 6.f, 4.f
149  },
150  qScale, qOffset));
151  // These values are per-channel of the input.
152  auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
153  auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
154  auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
155  auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
156  LayerTestResult<T,4> ret(outputTensorInfo);
157 
158  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
159  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
160 
162  armnn::WorkloadInfo info;
163  armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
164  armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
165  armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
166  armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
167 
168  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
169  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
170  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
171  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
172 
173  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
174  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
175  data.m_Mean = &meanTensor;
176  data.m_Variance = &varianceTensor;
177  data.m_Beta = &betaTensor;
178  data.m_Gamma = &gammaTensor;
179  data.m_Parameters.m_Eps = 0.0f;
181 
182  // For each channel:
183  // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
184  // multiply by gamma and add beta
185  ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
186  QuantizedVector<T>(
187  {
188  1.f, 3.f, 4.f, 3.f,
189  4.f, 4.f, 2.f, 3.f,
190  1.f, 2.f, 6.f, 4.f
191  },
192  qScale, qOffset));
193 
194  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
195 
196  inputHandle->Allocate();
197  outputHandle->Allocate();
198 
199  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
200 
201  workload->Execute();
202 
203  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
204 
205  return ret;
206 }
207 
208 } // anonymous namespace
209 
211  armnn::IWorkloadFactory& workloadFactory,
213  const armnn::ITensorHandleFactory& tensorHandleFactory)
214 {
215  // BatchSize: 1
216  // Channels: 2
217  // Height: 3
218  // Width: 2
219 
220  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
221  std::vector<float> inputValues
222  {
223  // Batch 0, Channel 0, Height (3) x Width (2)
224  1.f, 4.f,
225  4.f, 2.f,
226  1.f, 6.f,
227 
228  // Batch 0, Channel 1, Height (3) x Width (2)
229  1.f, 1.f,
230  4.f, 1.f,
231  -2.f, 4.f
232  };
233  std::vector<float> expectedOutputValues
234  {
235  // Batch 0, Channel 0, Height (3) x Width (2)
236  1.f, 4.f,
237  4.f, 2.f,
238  1.f, 6.f,
239 
240  // Batch 0, Channel 1, Height (3) x Width (2)
241  3.f, 3.f,
242  4.f, 3.f,
243  2.f, 4.f
244  };
245 
246  return BatchNormTestImpl<armnn::DataType::Float32>(
247  workloadFactory,
248  memoryManager,
249  tensorHandleFactory,
250  inputOutputShape,
251  inputValues,
252  expectedOutputValues,
253  0.f,
254  0,
256 }
257 
259  armnn::IWorkloadFactory& workloadFactory,
261  const armnn::ITensorHandleFactory& tensorHandleFactory)
262 {
263  // BatchSize: 1
264  // Height: 3
265  // Width: 2
266  // Channels: 2
267 
268  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
269  std::vector<float> inputValues
270  {
271  // Batch 0, Height 0, Width (2) x Channel (2)
272  1.f, 1.f,
273  4.f, 1.f,
274 
275  // Batch 0, Height 1, Width (2) x Channel (2)
276  4.f, 4.f,
277  2.f, 1.f,
278 
279  // Batch 0, Height 2, Width (2) x Channel (2)
280  1.f, -2.f,
281  6.f, 4.f
282  };
283  std::vector<float> expectedOutputValues
284  {
285  // Batch 0, Height 0, Width (2) x Channel (2)
286  1.f, 3.f,
287  4.f, 3.f,
288 
289  // Batch 0, Height 1, Width (2) x Channel (2)
290  4.f, 4.f,
291  2.f, 3.f,
292 
293  // Batch 0, Height 2, Width (2) x Channel (2)
294  1.f, 2.f,
295  6.f, 4.f
296  };
297 
298  return BatchNormTestImpl<armnn::DataType::Float32>(
299  workloadFactory,
300  memoryManager,
301  tensorHandleFactory,
302  inputOutputShape,
303  inputValues,
304  expectedOutputValues,
305  0.f,
306  0,
308 }
309 
311  armnn::IWorkloadFactory& workloadFactory,
313  const armnn::ITensorHandleFactory& tensorHandleFactory)
314 {
315  // BatchSize: 1
316  // Channels: 2
317  // Height: 3
318  // Width: 2
319 
320  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
321  std::vector<float> inputValues
322  {
323  // Batch 0, Channel 0, Height (3) x Width (2)
324  1.f, 4.f,
325  4.f, 2.f,
326  1.f, 6.f,
327 
328  // Batch 0, Channel 1, Height (3) x Width (2)
329  1.f, 1.f,
330  4.f, 1.f,
331  -2.f, 4.f
332  };
333  std::vector<float> expectedOutputValues
334  {
335  // Batch 0, Channel 0, Height (3) x Width (2)
336  1.f, 4.f,
337  4.f, 2.f,
338  1.f, 6.f,
339 
340  // Batch 0, Channel 1, Height (3) x Width (2)
341  3.f, 3.f,
342  4.f, 3.f,
343  2.f, 4.f
344  };
345 
346  return BatchNormTestImpl<armnn::DataType::Float16>(
347  workloadFactory,
348  memoryManager,
349  tensorHandleFactory,
350  inputOutputShape,
351  inputValues,
352  expectedOutputValues,
353  0.f,
354  0,
356 }
357 
359  armnn::IWorkloadFactory& workloadFactory,
361  const armnn::ITensorHandleFactory& tensorHandleFactory)
362 {
363  // BatchSize: 1
364  // Height: 3
365  // Width: 2
366  // Channels: 2
367 
368  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
369  std::vector<float> inputValues
370  {
371  // Batch 0, Height 0, Width (2) x Channel (2)
372  1.f, 1.f,
373  4.f, 1.f,
374 
375  // Batch 0, Height 1, Width (2) x Channel (2)
376  4.f, 4.f,
377  2.f, 1.f,
378 
379  // Batch 0, Height 2, Width (2) x Channel (2)
380  1.f, -2.f,
381  6.f, 4.f
382  };
383  std::vector<float> expectedOutputValues
384  {
385  // Batch 0, Height 0, Width (2) x Channel (2)
386  1.f, 3.f,
387  4.f, 3.f,
388 
389  // Batch 0, Height 1, Width (2) x Channel (2)
390  4.f, 4.f,
391  2.f, 3.f,
392 
393  // Batch 0, Height 2, Width (2) x Channel (2)
394  1.f, 2.f,
395  6.f, 4.f
396  };
397 
398  return BatchNormTestImpl<armnn::DataType::Float16>(
399  workloadFactory,
400  memoryManager,
401  tensorHandleFactory,
402  inputOutputShape,
403  inputValues,
404  expectedOutputValues,
405  0.f,
406  0,
408 }
409 
411  armnn::IWorkloadFactory& workloadFactory,
413  const armnn::ITensorHandleFactory& tensorHandleFactory)
414 {
415  // BatchSize: 1
416  // Channels: 2
417  // Height: 3
418  // Width: 2
419 
420  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
421  std::vector<float> inputValues
422  {
423  // Batch 0, Channel 0, Height (3) x Width (2)
424  1.f, 4.f,
425  4.f, 2.f,
426  1.f, 6.f,
427 
428  // Batch 0, Channel 1, Height (3) x Width (2)
429  1.f, 1.f,
430  4.f, 1.f,
431  -2.f, 4.f
432  };
433  std::vector<float> expectedOutputValues
434  {
435  // Batch 0, Channel 0, Height (3) x Width (2)
436  1.f, 4.f,
437  4.f, 2.f,
438  1.f, 6.f,
439 
440  // Batch 0, Channel 1, Height (3) x Width (2)
441  3.f, 3.f,
442  4.f, 3.f,
443  2.f, 4.f
444  };
445 
446  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
447  workloadFactory,
448  memoryManager,
449  tensorHandleFactory,
450  inputOutputShape,
451  inputValues,
452  expectedOutputValues,
453  1.f / 20.f,
454  50,
456 }
457 
459  armnn::IWorkloadFactory& workloadFactory,
461  const armnn::ITensorHandleFactory& tensorHandleFactory)
462 {
463  // BatchSize: 1
464  // Height: 3
465  // Width: 2
466  // Channels: 2
467 
468  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
469  std::vector<float> inputValues
470  {
471  // Batch 0, Height 0, Width (2) x Channel (2)
472  1.f, 1.f,
473  4.f, 1.f,
474 
475  // Batch 0, Height 1, Width (2) x Channel (2)
476  4.f, 4.f,
477  2.f, 1.f,
478 
479  // Batch 0, Height 2, Width (2) x Channel (2)
480  1.f, -2.f,
481  6.f, 4.f
482  };
483  std::vector<float> expectedOutputValues
484  {
485  // Batch 0, Height 0, Width (2) x Channel (2)
486  1.f, 3.f,
487  4.f, 3.f,
488 
489  // Batch 0, Height 1, Width (2) x Channel (2)
490  4.f, 4.f,
491  2.f, 3.f,
492 
493  // Batch 0, Height 2, Width (2) x Channel (2)
494  1.f, 2.f,
495  6.f, 4.f
496  };
497 
498  return BatchNormTestImpl<armnn::DataType::QAsymmU8>(
499  workloadFactory,
500  memoryManager,
501  tensorHandleFactory,
502  inputOutputShape, inputValues, expectedOutputValues,
503  1.f/20.f, 50, armnn::DataLayout::NHWC);
504 }
505 
507  armnn::IWorkloadFactory& workloadFactory,
509  const armnn::ITensorHandleFactory& tensorHandleFactory)
510 {
511  // BatchSize: 1
512  // Channels: 2
513  // Height: 3
514  // Width: 2
515 
516  const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
517  std::vector<float> inputValues
518  {
519  // Batch 0, Channel 0, Height (3) x Width (2)
520  1.f, 4.f,
521  4.f, 2.f,
522  1.f, 6.f,
523 
524  // Batch 0, Channel 1, Height (3) x Width (2)
525  1.f, 1.f,
526  4.f, 1.f,
527  -2.f, 4.f
528  };
529  std::vector<float> expectedOutputValues
530  {
531  // Batch 0, Channel 0, Height (3) x Width (2)
532  1.f, 4.f,
533  4.f, 2.f,
534  1.f, 6.f,
535 
536  // Batch 0, Channel 1, Height (3) x Width (2)
537  3.f, 3.f,
538  4.f, 3.f,
539  2.f, 4.f
540  };
541 
542  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
543  workloadFactory,
544  memoryManager,
545  tensorHandleFactory,
546  inputOutputShape,
547  inputValues,
548  expectedOutputValues,
549  1.f / 20.f,
550  50,
552 }
553 
555  armnn::IWorkloadFactory& workloadFactory,
557  const armnn::ITensorHandleFactory& tensorHandleFactory)
558 {
559  // BatchSize: 1
560  // Height: 3
561  // Width: 2
562  // Channels: 2
563 
564  const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
565  std::vector<float> inputValues
566  {
567  // Batch 0, Height 0, Width (2) x Channel (2)
568  1.f, 1.f,
569  4.f, 1.f,
570 
571  // Batch 0, Height 1, Width (2) x Channel (2)
572  4.f, 4.f,
573  2.f, 1.f,
574 
575  // Batch 0, Height 2, Width (2) x Channel (2)
576  1.f, -2.f,
577  6.f, 4.f
578  };
579  std::vector<float> expectedOutputValues
580  {
581  // Batch 0, Height 0, Width (2) x Channel (2)
582  1.f, 3.f,
583  4.f, 3.f,
584 
585  // Batch 0, Height 1, Width (2) x Channel (2)
586  4.f, 4.f,
587  2.f, 3.f,
588 
589  // Batch 0, Height 2, Width (2) x Channel (2)
590  1.f, 2.f,
591  6.f, 4.f
592  };
593 
594  return BatchNormTestImpl<armnn::DataType::QSymmS16>(
595  workloadFactory,
596  memoryManager,
597  tensorHandleFactory,
598  inputOutputShape,
599  inputValues,
600  expectedOutputValues,
601  1.f / 20.f,
602  50,
604 }
605 
607  armnn::IWorkloadFactory& workloadFactory,
609  armnn::IWorkloadFactory& refWorkloadFactory,
610  const armnn::ITensorHandleFactory& tensorHandleFactory,
611  const armnn::ITensorHandleFactory& refTensorHandleFactory)
612 {
613  IgnoreUnused(memoryManager);
614  const unsigned int width = 2;
615  const unsigned int height = 3;
616  const unsigned int channels = 5;
617  const unsigned int batchSize = 3;
618 
619  armnn::TensorInfo inputTensorInfo;
620  armnn::TensorInfo outputTensorInfo;
621  armnn::TensorInfo tensorInfo;
622 
623  constexpr unsigned int shape[] = {batchSize, channels, height, width};
624  constexpr unsigned int tensorShape[] = {channels};
625 
626  inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
627  outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
628  tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
629 
630  auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
631 
632  auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
633  auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
634  auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
635  auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
636 
637  LayerTestResult<float,4> ret(outputTensorInfo);
638 
639  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
640  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
641 
642  std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
643  std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
644 
646  armnn::WorkloadInfo info;
647  armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
648  armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
649  armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
650  armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
651 
652  AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
653  AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
654  AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
655  AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
656 
657  AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
658  AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
659  data.m_Mean = &meanTensor;
660  data.m_Variance = &varianceTensor;
661  data.m_Beta = &betaTensor;
662  data.m_Gamma = &gammaTensor;
663  data.m_Parameters.m_Eps = 0.01f;
664 
666  armnn::WorkloadInfo refInfo = info;
667  SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
668  SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
669 
670  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
671  std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
672 
673  inputHandle->Allocate();
674  outputHandle->Allocate();
675  inputHandleRef->Allocate();
676  outputHandleRef->Allocate();
677 
678  CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
679  CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
680 
681  workload->PostAllocationConfigure();
682  workload->Execute();
683  workloadRef->PostAllocationConfigure();
684  workloadRef->Execute();
685 
686  CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
687  CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
688 
689  return ret;
690 }
virtual std::unique_ptr< IWorkload > CreateBatchNormalization(const BatchNormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > CompareBatchNormTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, armnn::IWorkloadFactory &refWorkloadFactory, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::ITensorHandleFactory &refTensorHandleFactory)
const ConstCpuTensorHandle * m_Gamma
const ConstCpuTensorHandle * m_Beta
DataLayout
Definition: Types.hpp:50
LayerTestResult< uint8_t, 4 > BatchNormUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > BatchNormUint8NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 4 > BatchNormInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const ConstCpuTensorHandle * m_Mean
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
const ConstCpuTensorHandle * m_Variance
void IgnoreUnused(Ts &&...)
LayerTestResult< int16_t, 4 > BatchNormInt16NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
LayerTestResult< float, 4 > BatchNormFloat32NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:464
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< float, 4 > BatchNormFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< armnn::Half, 4 > BatchNormFloat16NhwcTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< armnn::Half, 4 > BatchNormFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about inputs and outputs to a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)