ArmNN
 22.05
L2NormalizationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
9 #include <ResolveType.hpp>
10 
12 #include <armnnUtils/Permute.hpp>
13 
16 
18 
19 #include <numeric>
20 
21 namespace
22 {
23 
24 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
25 LayerTestResult<T, 4> L2NormalizationTestImpl(
26  armnn::IWorkloadFactory& workloadFactory,
28  const armnn::ITensorHandleFactory& tensorHandleFactory,
29  const armnn::TensorShape& inputOutputTensorShape,
30  float scale,
31  int32_t offset,
32  const std::vector<float>& inputValues,
33  float outScale,
34  int32_t outOffset,
35  std::vector<float>& expectedOutputValues,
36  const armnn::DataLayout layout,
37  float epsilon = 1e-12f)
38 {
39  IgnoreUnused(memoryManager);
40  const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
41  const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
42 
43  // at this point if we require it permute the input data
44  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
45  std::vector<float> inputData = inputValues;
46  if (layout == armnn::DataLayout::NHWC)
47  {
48  std::vector<float> tmp(inputData.size());
49  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
50  inputData = tmp;
51  }
52 
53  auto inputTensor = armnnUtils::QuantizedVector<T>(inputData,
54  inputTensorInfo.GetQuantizationScale(),
55  inputTensorInfo.GetQuantizationOffset());
56 
57  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
58 
59  if (layout == armnn::DataLayout::NHWC)
60  {
61  std::vector<float> tmp(expectedOutputValues.size());
62  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputValues.data(), tmp.data(),
63  sizeof(float));
64  expectedOutputValues = tmp;
65  }
66 
67  std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputValues,
68  outputTensorInfo.GetQuantizationScale(),
69  outputTensorInfo.GetQuantizationOffset());
70 
71  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
72  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
73 
75  descriptor.m_Parameters.m_Eps = epsilon;
76  descriptor.m_Parameters.m_DataLayout = layout;
78 
79  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
80  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
81 
82  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::L2Normalization,
83  descriptor,
84  info);
85 
86  inputHandle->Allocate();
87  outputHandle->Allocate();
88 
89  CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
90 
91  workload->PostAllocationConfigure();
92  ExecuteWorkload(*workload, memoryManager);
93 
94  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
95 
96  return LayerTestResult<T, 4>(actualOutput,
97  expectedOutputData,
98  outputHandle->GetShape(),
99  outputTensorInfo.GetShape());
100 }
101 
102 float CalcInvL2Norm(std::initializer_list<float> elements)
103 {
104  const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
105  [](float acc, float element) { return acc + element * element; });
106  return 1.0f / sqrtf(reduction);
107 }
108 
109 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
110 LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
111  armnn::IWorkloadFactory& workloadFactory,
113  const armnn::ITensorHandleFactory& tensorHandleFactory,
114  float scale,
115  int32_t offset,
116  float outScale,
117  int32_t outOffset,
118  const armnn::DataLayout layout,
119  float epsilon)
120 {
121  // Width: 1
122  // Height: 1
123  // Channels: 3
124  // BatchSize: 1
125  unsigned int numberOfBatches = 1;
126  unsigned int numberOfChannels = 3;
127  unsigned int height = 1;
128  unsigned int width = 1;
129 
130  const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
131  numberOfBatches, numberOfChannels, height, width, layout);
132 
133  // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
134  std::vector<float> inputValues
135  {
136  // Batch 0, Channel 0, Height (1) x Width (1)
137  0.00000001f,
138 
139  // Batch 0, Channel 1, Height (1) x Width (1)
140  0.00000002f,
141 
142  // Batch 0, Channel 2, Height (1) x Width (1)
143  0.00000003f,
144  };
145 
146  const float approxInvL2Norm = 1.f / sqrtf(epsilon);
147  std::vector<float> expectedOutputValues
148  {
149  // Batch 0, Channel 0, Height (1) x Width (1)
150  0.00000001f * approxInvL2Norm,
151  0.00000002f * approxInvL2Norm,
152  0.00000003f * approxInvL2Norm,
153  };
154 
155  return L2NormalizationTestImpl<ArmnnType>(
156  workloadFactory,
157  memoryManager,
158  tensorHandleFactory,
159  inputOutputShape,
160  scale,
161  offset,
162  inputValues,
163  outScale,
164  outOffset,
165  expectedOutputValues,
166  layout,
167  epsilon);
168 }
169 
170 
171 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
172 LayerTestResult<T, 4> L2Normalization1dTestCommon(
173  armnn::IWorkloadFactory& workloadFactory,
175  const armnn::ITensorHandleFactory& tensorHandleFactory,
176  float scale,
177  int32_t offset,
178  float outScale,
179  int32_t outOffset,
180  const armnn::DataLayout layout)
181 {
182  // Width: 1
183  // Height: 1
184  // Channels: 10
185  // BatchSize: 1
186  unsigned int numberOfBatches = 1;
187  unsigned int numberOfChannels = 10;
188  unsigned int height = 1;
189  unsigned int width = 1;
190 
191 
192  const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
193  numberOfBatches, numberOfChannels, height, width, layout);
194  std::vector<float> inputValues
195  {
196  // Batch 0, Channel 0, Height (1) x Width (1)
197  1.0f,
198 
199  // Batch 0, Channel 1, Height (1) x Width (1)
200  2.0f,
201 
202  // Batch 0, Channel 2, Height (1) x Width (1)
203  3.0f,
204 
205  // Batch 0, Channel 3, Height (1) x Width (1)
206  4.0f,
207 
208  // Batch 0, Channel 4, Height (1) x Width (1)
209  5.0f,
210 
211  // Batch 0, Channel 5, Height (1) x Width (1)
212  6.0f,
213 
214  // Batch 0, Channel 6, Height (1) x Width (1)
215  7.0f,
216 
217  // Batch 0, Channel 7, Height (1) x Width (1)
218  8.0f,
219 
220  // Batch 0, Channel 8, Height (1) x Width (1)
221  9.0f,
222 
223  // Batch 0, Channel 9, Height (1) x Width (1)
224  10.0f
225  };
226  const float approxInvL2Norm = 0.050964719f;
227  std::vector<float> expectedOutputValues
228  {
229  // Batch 0, Channel 0, Height (1) x Width (1)
230  1.0f * approxInvL2Norm,
231  2.0f * approxInvL2Norm,
232  3.0f * approxInvL2Norm,
233  4.0f * approxInvL2Norm,
234  5.0f * approxInvL2Norm,
235  6.0f * approxInvL2Norm,
236  7.0f * approxInvL2Norm,
237  8.0f * approxInvL2Norm,
238  9.0f * approxInvL2Norm,
239  10.0f * approxInvL2Norm
240  };
241 
242 
243  return L2NormalizationTestImpl<ArmnnType>(
244  workloadFactory,
245  memoryManager,
246  tensorHandleFactory,
247  inputOutputShape,
248  scale,
249  offset,
250  inputValues,
251  outScale,
252  outOffset,
253  expectedOutputValues,
254  layout);
255 }
256 
257 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
258 LayerTestResult<T, 4> L2Normalization2dTestCommon(
259  armnn::IWorkloadFactory& workloadFactory,
261  const armnn::ITensorHandleFactory& tensorHandleFactory,
262  float scale,
263  int32_t offset,
264  float outScale,
265  int32_t outOffset,
266  const armnn::DataLayout layout)
267 {
268  // Width: 5
269  // Height: 1
270  // Channels: 2
271  // BatchSize: 1
272  unsigned int numberOfBatches = 1;
273  unsigned int numberOfChannels = 2;
274  unsigned int height = 1;
275  unsigned int width = 5;
276 
277  const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
278  numberOfBatches, numberOfChannels, height, width, layout);
279  std::vector<float> inputValues
280  {
281  // Batch 0, Channel 0, Height (1) x Width (5)
282  1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
283 
284  // Batch 0, Channel 1, Height (1) x Width (5)
285  2.0f, 4.0f, 6.0f, 8.0f, 10.0f
286  };
287  std::vector<float> expectedOutputValues
288  {
289  // Batch 0, Channel 0, Height (1) x Width (5)
290  1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
291  3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
292  5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
293  7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
294  9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
295 
296  // Batch 0, Channel 1, Height (1) x Width (5)
297  2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
298  4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
299  6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
300  8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
301  10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
302  };
303 
304  return L2NormalizationTestImpl<ArmnnType>(
305  workloadFactory,
306  memoryManager,
307  tensorHandleFactory,
308  inputOutputShape,
309  scale,
310  offset,
311  inputValues,
312  outScale,
313  outOffset,
314  expectedOutputValues,
315  layout);
316 }
317 
318 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
319 LayerTestResult<T, 4> L2Normalization3dTestCommon(
320  armnn::IWorkloadFactory& workloadFactory,
322  const armnn::ITensorHandleFactory& tensorHandleFactory,
323  float scale,
324  int32_t offset,
325  float outScale,
326  int32_t outOffset,
327  const armnn::DataLayout layout)
328 {
329  // Width: 3
330  // Height: 4
331  // Channels: 2
332  // BatchSize: 1
333  unsigned int numberOfBatches = 1;
334  unsigned int numberOfChannels = 2;
335  unsigned int height = 4;
336  unsigned int width = 3;
337 
338  const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
339  numberOfBatches, numberOfChannels, height, width, layout);
340  std::vector<float> inputValues
341  {
342  // Batch 0, Channel 0, Height (4) x Width (3)
343  119.0f, 21.0f, 150.0f,
344  149.0f, 32.0f, 179.0f,
345  15.0f, 227.0f, 141.0f,
346  147.0f, 199.0f, 220.0f,
347 
348  // Batch 0, Channel 1, Height (4) x Width (3)
349  110.0f, 140.0f, 73.0f,
350  211.0f, 212.0f, 89.0f,
351  24.0f, 138.0f, 188.0f,
352  162.0f, 12.0f, 161.0f
353  };
354  std::vector<float> expectedOutputValues
355  {
356  // Batch 0, Channel 0, Height (4) x Width (3)
357  119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
358  21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
359  150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
360  149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
361  32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
362  179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
363  15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
364  227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
365  141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
366  147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
367  199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
368  220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
369 
370  // Batch 0, Channel 1, Height (4) x Width (3)
371  110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
372  140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
373  73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
374  211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
375  212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
376  89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
377  24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
378  138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
379  188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
380  162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
381  12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
382  161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
383  };
384 
385  return L2NormalizationTestImpl<ArmnnType>(
386  workloadFactory,
387  memoryManager,
388  tensorHandleFactory,
389  inputOutputShape,
390  scale,
391  offset,
392  inputValues,
393  outScale,
394  outOffset,
395  expectedOutputValues,
396  layout);
397 }
398 
399 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
400 LayerTestResult<T, 4> L2Normalization4dTestCommon(
401  armnn::IWorkloadFactory& workloadFactory,
403  const armnn::ITensorHandleFactory& tensorHandleFactory,
404  float scale,
405  int32_t offset,
406  float outScale,
407  int32_t outOffset,
408  const armnn::DataLayout layout)
409 {
410  // Width: 3
411  // Height: 4
412  // Channels: 3
413  // BatchSize: 2
414  unsigned int numberOfBatches = 2;
415  unsigned int numberOfChannels = 3;
416  unsigned int height = 4;
417  unsigned int width = 3;
418 
419  const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
420  numberOfBatches, numberOfChannels, height, width, layout);
421  std::vector<float> inputValues
422  {
423  // Batch 0, Channel 0, Height (4) x Width (3)
424  235.0f, 46.0f, 178.0f,
425  100.0f, 123.0f, 19.0f,
426  172.0f, 74.0f, 250.0f,
427  6.0f, 195.0f, 80.0f,
428 
429  // Batch 0, Channel 1, Height (4) x Width (3)
430  113.0f, 95.0f, 202.0f,
431  77.0f, 114.0f, 71.0f,
432  122.0f, 246.0f, 166.0f,
433  82.0f, 28.0f, 37.0f,
434 
435  // Batch 0, Channel 2, Height (4) x Width (3)
436  56.0f, 170.0f, 162.0f,
437  194.0f, 89.0f, 254.0f,
438  12.0f, 209.0f, 200.0f,
439  1.0f, 64.0f, 54.0f,
440 
441  // Batch 1, Channel 0, Height (4) x Width (3)
442  67.0f, 90.0f, 49.0f,
443  7.0f, 163.0f, 18.0f,
444  25.0f, 117.0f, 103.0f,
445  247.0f, 59.0f, 189.0f,
446 
447  // Batch 1, Channel 1, Height (4) x Width (3)
448  239.0f, 104.0f, 199.0f,
449  17.0f, 124.0f, 153.0f,
450  222.0f, 217.0f, 75.0f,
451  32.0f, 126.0f, 21.0f,
452 
453  // Batch 1, Channel 2, Height (4) x Width (3)
454  97.0f, 145.0f, 215.0f,
455  115.0f, 116.0f, 238.0f,
456  226.0f, 16.0f, 132.0f,
457  92.0f, 125.0f, 88.0f
458  };
459  std::vector<float> expectedOutputValues
460  {
461  // Batch 0, Channel 0, Height (4) x Width (3)
462  235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
463  46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
464  178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
465  100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
466  123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
467  19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
468  172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
469  74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
470  250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
471  6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
472  195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
473  80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
474 
475  // Batch 0, Channel 1, Height (4) x Width (3)
476  113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
477  95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
478  202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
479  77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
480  114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
481  71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
482  122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
483  246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
484  166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
485  82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
486  28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
487  37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
488 
489  // Batch 0, Channel 2, Height (4) x Width (3)
490  56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
491  170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
492  162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
493  194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
494  89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
495  254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
496  12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
497  209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
498  200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
499  1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
500  64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
501  54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
502 
503  // Batch 1, Channel 0, Height (4) x Width (3)
504  67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
505  90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
506  49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
507  7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
508  163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
509  18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
510  25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
511  117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
512  103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
513  247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
514  59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
515  189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
516 
517  // Batch 1, Channel 1, Height (4) x Width (3)
518  239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
519  104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
520  199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
521  17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
522  124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
523  153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
524  222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
525  217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
526  75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
527  32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
528  126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
529  21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
530 
531  // Batch 1, Channel 2, Height (4) x Width (3)
532  97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
533  145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
534  215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
535  115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
536  116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
537  238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
538  226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
539  16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
540  132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
541  92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
542  125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
543  88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
544  };
545 
546  return L2NormalizationTestImpl<ArmnnType>(
547  workloadFactory,
548  memoryManager,
549  tensorHandleFactory,
550  inputOutputShape,
551  scale,
552  offset,
553  inputValues,
554  outScale,
555  outOffset,
556  expectedOutputValues,
557  layout);
558 }
559 
560 } // anonymous namespace
561 
563  armnn::IWorkloadFactory& workloadFactory,
565  const armnn::ITensorHandleFactory& tensorHandleFactory,
566  const armnn::DataLayout layout)
567 {
568  // Dummy descriptor to get the default value of epsilon.
570 
571  return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
572  workloadFactory,
573  memoryManager,
574  tensorHandleFactory,
575  0.f,
576  0,
577  0.f,
578  0,
579  layout,
580  descriptor.m_Eps);
581 }
582 
584  armnn::IWorkloadFactory& workloadFactory,
586  const armnn::ITensorHandleFactory& tensorHandleFactory,
587  const armnn::DataLayout layout)
588 {
589  return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
590  workloadFactory,
591  memoryManager,
592  tensorHandleFactory,
593  0.f,
594  0,
595  0.f,
596  0,
597  layout,
598  1e-9f);
599 }
600 
602  armnn::IWorkloadFactory& workloadFactory,
604  const armnn::ITensorHandleFactory& tensorHandleFactory,
605  const armnn::DataLayout layout)
606 {
607  return L2Normalization1dTestCommon<armnn::DataType::Float32>(
608  workloadFactory,
609  memoryManager,
610  tensorHandleFactory,
611  0.f,
612  0,
613  0.f,
614  0,
615  layout);
616 }
617 
619  armnn::IWorkloadFactory& workloadFactory,
621  const armnn::ITensorHandleFactory& tensorHandleFactory,
622  const armnn::DataLayout layout)
623 {
624  return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
625  workloadFactory,
626  memoryManager,
627  tensorHandleFactory,
628  1.f,
629  0,
630  1.f,
631  0,
632  layout);
633 }
634 
636  armnn::IWorkloadFactory& workloadFactory,
638  const armnn::ITensorHandleFactory& tensorHandleFactory,
639  const armnn::DataLayout layout)
640 {
641  return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
642  workloadFactory,
643  memoryManager,
644  tensorHandleFactory,
645  1.f,
646  0,
647  1.f / 128,
648  128,
649  layout);
650 }
651 
653  armnn::IWorkloadFactory& workloadFactory,
655  const armnn::ITensorHandleFactory& tensorHandleFactory,
656  const armnn::DataLayout layout)
657 {
658  return L2Normalization2dTestCommon<armnn::DataType::Float32>(
659  workloadFactory,
660  memoryManager,
661  tensorHandleFactory,
662  0.f,
663  0,
664  0.f,
665  0,
666  layout);
667 }
668 
670  armnn::IWorkloadFactory& workloadFactory,
672  const armnn::ITensorHandleFactory& tensorHandleFactory,
673  const armnn::DataLayout layout)
674 {
675  return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
676  workloadFactory,
677  memoryManager,
678  tensorHandleFactory,
679  1.f,
680  0,
681  1.f,
682  0,
683  layout);
684 }
685 
687  armnn::IWorkloadFactory& workloadFactory,
689  const armnn::ITensorHandleFactory& tensorHandleFactory,
690  const armnn::DataLayout layout)
691 {
692  return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
693  workloadFactory,
694  memoryManager,
695  tensorHandleFactory,
696  1.f,
697  0,
698  1.f / 128,
699  128,
700  layout);
701 }
702 
704  armnn::IWorkloadFactory& workloadFactory,
706  const armnn::ITensorHandleFactory& tensorHandleFactory)
707 {
709  const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
710 
711  std::vector<float> inputData
712  {
713  1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
714  };
715  std::vector<float> expectedOutputData
716  {
717  1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
718  2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
719  3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
720  4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
721  5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
722  6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
723  7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
724  8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
725  9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
726  10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
727  };
728 
729  const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
730  const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
731 
732  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
733 
734  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
735  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
736 
738  descriptor.m_Parameters.m_Eps = 1e-12f;
739  descriptor.m_Parameters.m_DataLayout = layout;
740  armnn::WorkloadInfo info;
741 
742  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
743  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
744 
745  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::L2Normalization,
746  descriptor,
747  info);
748 
749  inputHandle->Allocate();
750  outputHandle->Allocate();
751 
752  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
753 
754  workload->PostAllocationConfigure();
755  ExecuteWorkload(*workload, memoryManager);
756 
757  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
758 
759  return LayerTestResult<float, 2>(actualOutput,
760  expectedOutputData,
761  outputHandle->GetShape(),
762  outputTensorInfo.GetShape());
763 }
764 
766  armnn::IWorkloadFactory& workloadFactory,
768  const armnn::ITensorHandleFactory& tensorHandleFactory,
769  const armnn::DataLayout layout)
770 {
771  return L2Normalization3dTestCommon<armnn::DataType::Float32>(
772  workloadFactory,
773  memoryManager,
774  tensorHandleFactory,
775  0.f,
776  0,
777  0.f,
778  0,
779  layout);
780 }
781 
783  armnn::IWorkloadFactory& workloadFactory,
785  const armnn::ITensorHandleFactory& tensorHandleFactory,
786  const armnn::DataLayout layout)
787 {
788  return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
789  workloadFactory,
790  memoryManager,
791  tensorHandleFactory,
792  1.f,
793  0,
794  1.f,
795  0,
796  layout);
797 }
798 
800  armnn::IWorkloadFactory& workloadFactory,
802  const armnn::ITensorHandleFactory& tensorHandleFactory,
803  const armnn::DataLayout layout)
804 {
805  return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
806  workloadFactory,
807  memoryManager,
808  tensorHandleFactory,
809  1.f,
810  0,
811  1.f / 128,
812  128,
813  layout);
814 }
815 
817  armnn::IWorkloadFactory& workloadFactory,
819  const armnn::ITensorHandleFactory& tensorHandleFactory,
820  const armnn::DataLayout layout)
821 {
822  return L2Normalization4dTestCommon<armnn::DataType::Float32>(
823  workloadFactory,
824  memoryManager,
825  tensorHandleFactory,
826  0.f,
827  0,
828  0.f,
829  0,
830  layout);
831 }
832 
834  armnn::IWorkloadFactory& workloadFactory,
836  const armnn::ITensorHandleFactory& tensorHandleFactory,
837  const armnn::DataLayout layout)
838 {
839  return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
840  workloadFactory,
841  memoryManager,
842  tensorHandleFactory,
843  1.f,
844  0,
845  1.f,
846  0,
847  layout);
848 }
849 
851  armnn::IWorkloadFactory& workloadFactory,
853  const armnn::ITensorHandleFactory& tensorHandleFactory,
854  const armnn::DataLayout layout)
855 {
856  return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
857  workloadFactory,
858  memoryManager,
859  tensorHandleFactory,
860  1.f,
861  0,
862  1.f / 128,
863  128,
864  layout);
865 }
float m_Eps
Used to avoid dividing by zero.
DataLayout
Definition: Types.hpp:62
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
LayerTestResult< float, 4 > L2Normalization4dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization3dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void IgnoreUnused(Ts &&...)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerTestResult< float, 4 > L2Normalization3dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
LayerTestResult< float, 4 > L2Normalization1dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
A L2NormalizationDescriptor for the L2NormalizationLayer.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< uint8_t, 4 > L2Normalization3dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int16_t, 4 > L2Normalization1dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< float, 2 > L2Normalization2dShapeTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)
Definition: TensorUtils.cpp:19
LayerTestResult< uint8_t, 4 > L2Normalization2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< float, 4 > L2NormalizationDefaultEpsilonTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< int16_t, 4 > L2Normalization4dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
Contains information about TensorInfos of a layer.
LayerTestResult< uint8_t, 4 > L2Normalization1dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< float, 4 > L2Normalization2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
LayerTestResult< float, 4 > L2NormalizationNonDefaultEpsilonTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< uint8_t, 4 > L2Normalization4dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)