ArmNN
 21.11
L2NormalizationTestImpl.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10 
12 #include <armnnUtils/Permute.hpp>
13 
16 
17 #include <test/TensorHelpers.hpp>
18 
19 #include <numeric>
20 
21 namespace
22 {
23 
24 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
25 LayerTestResult<T, 4> L2NormalizationTestImpl(
26  armnn::IWorkloadFactory& workloadFactory,
28  const armnn::ITensorHandleFactory& tensorHandleFactory,
29  const armnn::TensorShape& inputOutputTensorShape,
30  float scale,
31  int32_t offset,
32  const std::vector<float>& inputValues,
33  float outScale,
34  int32_t outOffset,
35  std::vector<float>& expectedOutputValues,
36  const armnn::DataLayout layout,
37  float epsilon = 1e-12f)
38 {
39  IgnoreUnused(memoryManager);
40  const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
41  const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
42 
43  // at this point if we require it permute the input data
44  const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
45  std::vector<float> inputData = inputValues;
46  if (layout == armnn::DataLayout::NHWC)
47  {
48  std::vector<float> tmp(inputData.size());
49  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
50  inputData = tmp;
51  }
52 
53  auto inputTensor = armnnUtils::QuantizedVector<T>(inputData,
54  inputTensorInfo.GetQuantizationScale(),
55  inputTensorInfo.GetQuantizationOffset());
56 
57  std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
58 
59  if (layout == armnn::DataLayout::NHWC)
60  {
61  std::vector<float> tmp(expectedOutputValues.size());
62  armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputValues.data(), tmp.data(),
63  sizeof(float));
64  expectedOutputValues = tmp;
65  }
66 
67  std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputValues,
68  outputTensorInfo.GetQuantizationScale(),
69  outputTensorInfo.GetQuantizationOffset());
70 
71  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
72  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
73 
75  descriptor.m_Parameters.m_Eps = epsilon;
76  descriptor.m_Parameters.m_DataLayout = layout;
78 
79  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
80  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
81 
82  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
83 
84  inputHandle->Allocate();
85  outputHandle->Allocate();
86 
87  CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
88 
89  workload->PostAllocationConfigure();
90  ExecuteWorkload(*workload, memoryManager);
91 
92  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
93 
94  return LayerTestResult<T, 4>(actualOutput,
95  expectedOutputData,
96  outputHandle->GetShape(),
97  outputTensorInfo.GetShape());
98 }
99 
100 float CalcInvL2Norm(std::initializer_list<float> elements)
101 {
102  const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
103  [](float acc, float element) { return acc + element * element; });
104  return 1.0f / sqrtf(reduction);
105 }
106 
107 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
108 LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
109  armnn::IWorkloadFactory& workloadFactory,
111  const armnn::ITensorHandleFactory& tensorHandleFactory,
112  float scale,
113  int32_t offset,
114  float outScale,
115  int32_t outOffset,
116  const armnn::DataLayout layout,
117  float epsilon)
118 {
119  // Width: 1
120  // Height: 1
121  // Channels: 3
122  // BatchSize: 1
123  unsigned int numberOfBatches = 1;
124  unsigned int numberOfChannels = 3;
125  unsigned int height = 1;
126  unsigned int width = 1;
127 
128  const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
129  numberOfBatches, numberOfChannels, height, width, layout);
130 
131  // 0.0000001^2 + 0.00000002^2 + 0.00000003^2 < 1e-12
132  std::vector<float> inputValues
133  {
134  // Batch 0, Channel 0, Height (1) x Width (1)
135  0.00000001f,
136 
137  // Batch 0, Channel 1, Height (1) x Width (1)
138  0.00000002f,
139 
140  // Batch 0, Channel 2, Height (1) x Width (1)
141  0.00000003f,
142  };
143 
144  const float approxInvL2Norm = 1.f / sqrtf(epsilon);
145  std::vector<float> expectedOutputValues
146  {
147  // Batch 0, Channel 0, Height (1) x Width (1)
148  0.00000001f * approxInvL2Norm,
149  0.00000002f * approxInvL2Norm,
150  0.00000003f * approxInvL2Norm,
151  };
152 
153  return L2NormalizationTestImpl<ArmnnType>(
154  workloadFactory,
155  memoryManager,
156  tensorHandleFactory,
157  inputOutputShape,
158  scale,
159  offset,
160  inputValues,
161  outScale,
162  outOffset,
163  expectedOutputValues,
164  layout,
165  epsilon);
166 }
167 
168 
169 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
170 LayerTestResult<T, 4> L2Normalization1dTestCommon(
171  armnn::IWorkloadFactory& workloadFactory,
173  const armnn::ITensorHandleFactory& tensorHandleFactory,
174  float scale,
175  int32_t offset,
176  float outScale,
177  int32_t outOffset,
178  const armnn::DataLayout layout)
179 {
180  // Width: 1
181  // Height: 1
182  // Channels: 10
183  // BatchSize: 1
184  unsigned int numberOfBatches = 1;
185  unsigned int numberOfChannels = 10;
186  unsigned int height = 1;
187  unsigned int width = 1;
188 
189 
190  const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
191  numberOfBatches, numberOfChannels, height, width, layout);
192  std::vector<float> inputValues
193  {
194  // Batch 0, Channel 0, Height (1) x Width (1)
195  1.0f,
196 
197  // Batch 0, Channel 1, Height (1) x Width (1)
198  2.0f,
199 
200  // Batch 0, Channel 2, Height (1) x Width (1)
201  3.0f,
202 
203  // Batch 0, Channel 3, Height (1) x Width (1)
204  4.0f,
205 
206  // Batch 0, Channel 4, Height (1) x Width (1)
207  5.0f,
208 
209  // Batch 0, Channel 5, Height (1) x Width (1)
210  6.0f,
211 
212  // Batch 0, Channel 6, Height (1) x Width (1)
213  7.0f,
214 
215  // Batch 0, Channel 7, Height (1) x Width (1)
216  8.0f,
217 
218  // Batch 0, Channel 8, Height (1) x Width (1)
219  9.0f,
220 
221  // Batch 0, Channel 9, Height (1) x Width (1)
222  10.0f
223  };
224  const float approxInvL2Norm = 0.050964719f;
225  std::vector<float> expectedOutputValues
226  {
227  // Batch 0, Channel 0, Height (1) x Width (1)
228  1.0f * approxInvL2Norm,
229  2.0f * approxInvL2Norm,
230  3.0f * approxInvL2Norm,
231  4.0f * approxInvL2Norm,
232  5.0f * approxInvL2Norm,
233  6.0f * approxInvL2Norm,
234  7.0f * approxInvL2Norm,
235  8.0f * approxInvL2Norm,
236  9.0f * approxInvL2Norm,
237  10.0f * approxInvL2Norm
238  };
239 
240 
241  return L2NormalizationTestImpl<ArmnnType>(
242  workloadFactory,
243  memoryManager,
244  tensorHandleFactory,
245  inputOutputShape,
246  scale,
247  offset,
248  inputValues,
249  outScale,
250  outOffset,
251  expectedOutputValues,
252  layout);
253 }
254 
255 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
256 LayerTestResult<T, 4> L2Normalization2dTestCommon(
257  armnn::IWorkloadFactory& workloadFactory,
259  const armnn::ITensorHandleFactory& tensorHandleFactory,
260  float scale,
261  int32_t offset,
262  float outScale,
263  int32_t outOffset,
264  const armnn::DataLayout layout)
265 {
266  // Width: 5
267  // Height: 1
268  // Channels: 2
269  // BatchSize: 1
270  unsigned int numberOfBatches = 1;
271  unsigned int numberOfChannels = 2;
272  unsigned int height = 1;
273  unsigned int width = 5;
274 
275  const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
276  numberOfBatches, numberOfChannels, height, width, layout);
277  std::vector<float> inputValues
278  {
279  // Batch 0, Channel 0, Height (1) x Width (5)
280  1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
281 
282  // Batch 0, Channel 1, Height (1) x Width (5)
283  2.0f, 4.0f, 6.0f, 8.0f, 10.0f
284  };
285  std::vector<float> expectedOutputValues
286  {
287  // Batch 0, Channel 0, Height (1) x Width (5)
288  1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
289  3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
290  5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
291  7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
292  9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
293 
294  // Batch 0, Channel 1, Height (1) x Width (5)
295  2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
296  4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
297  6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
298  8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
299  10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
300  };
301 
302  return L2NormalizationTestImpl<ArmnnType>(
303  workloadFactory,
304  memoryManager,
305  tensorHandleFactory,
306  inputOutputShape,
307  scale,
308  offset,
309  inputValues,
310  outScale,
311  outOffset,
312  expectedOutputValues,
313  layout);
314 }
315 
316 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
317 LayerTestResult<T, 4> L2Normalization3dTestCommon(
318  armnn::IWorkloadFactory& workloadFactory,
320  const armnn::ITensorHandleFactory& tensorHandleFactory,
321  float scale,
322  int32_t offset,
323  float outScale,
324  int32_t outOffset,
325  const armnn::DataLayout layout)
326 {
327  // Width: 3
328  // Height: 4
329  // Channels: 2
330  // BatchSize: 1
331  unsigned int numberOfBatches = 1;
332  unsigned int numberOfChannels = 2;
333  unsigned int height = 4;
334  unsigned int width = 3;
335 
336  const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
337  numberOfBatches, numberOfChannels, height, width, layout);
338  std::vector<float> inputValues
339  {
340  // Batch 0, Channel 0, Height (4) x Width (3)
341  119.0f, 21.0f, 150.0f,
342  149.0f, 32.0f, 179.0f,
343  15.0f, 227.0f, 141.0f,
344  147.0f, 199.0f, 220.0f,
345 
346  // Batch 0, Channel 1, Height (4) x Width (3)
347  110.0f, 140.0f, 73.0f,
348  211.0f, 212.0f, 89.0f,
349  24.0f, 138.0f, 188.0f,
350  162.0f, 12.0f, 161.0f
351  };
352  std::vector<float> expectedOutputValues
353  {
354  // Batch 0, Channel 0, Height (4) x Width (3)
355  119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
356  21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
357  150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
358  149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
359  32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
360  179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
361  15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
362  227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
363  141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
364  147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
365  199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
366  220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
367 
368  // Batch 0, Channel 1, Height (4) x Width (3)
369  110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
370  140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
371  73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
372  211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
373  212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
374  89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
375  24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
376  138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
377  188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
378  162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
379  12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
380  161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
381  };
382 
383  return L2NormalizationTestImpl<ArmnnType>(
384  workloadFactory,
385  memoryManager,
386  tensorHandleFactory,
387  inputOutputShape,
388  scale,
389  offset,
390  inputValues,
391  outScale,
392  outOffset,
393  expectedOutputValues,
394  layout);
395 }
396 
397 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
398 LayerTestResult<T, 4> L2Normalization4dTestCommon(
399  armnn::IWorkloadFactory& workloadFactory,
401  const armnn::ITensorHandleFactory& tensorHandleFactory,
402  float scale,
403  int32_t offset,
404  float outScale,
405  int32_t outOffset,
406  const armnn::DataLayout layout)
407 {
408  // Width: 3
409  // Height: 4
410  // Channels: 3
411  // BatchSize: 2
412  unsigned int numberOfBatches = 2;
413  unsigned int numberOfChannels = 3;
414  unsigned int height = 4;
415  unsigned int width = 3;
416 
417  const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
418  numberOfBatches, numberOfChannels, height, width, layout);
419  std::vector<float> inputValues
420  {
421  // Batch 0, Channel 0, Height (4) x Width (3)
422  235.0f, 46.0f, 178.0f,
423  100.0f, 123.0f, 19.0f,
424  172.0f, 74.0f, 250.0f,
425  6.0f, 195.0f, 80.0f,
426 
427  // Batch 0, Channel 1, Height (4) x Width (3)
428  113.0f, 95.0f, 202.0f,
429  77.0f, 114.0f, 71.0f,
430  122.0f, 246.0f, 166.0f,
431  82.0f, 28.0f, 37.0f,
432 
433  // Batch 0, Channel 2, Height (4) x Width (3)
434  56.0f, 170.0f, 162.0f,
435  194.0f, 89.0f, 254.0f,
436  12.0f, 209.0f, 200.0f,
437  1.0f, 64.0f, 54.0f,
438 
439  // Batch 1, Channel 0, Height (4) x Width (3)
440  67.0f, 90.0f, 49.0f,
441  7.0f, 163.0f, 18.0f,
442  25.0f, 117.0f, 103.0f,
443  247.0f, 59.0f, 189.0f,
444 
445  // Batch 1, Channel 1, Height (4) x Width (3)
446  239.0f, 104.0f, 199.0f,
447  17.0f, 124.0f, 153.0f,
448  222.0f, 217.0f, 75.0f,
449  32.0f, 126.0f, 21.0f,
450 
451  // Batch 1, Channel 2, Height (4) x Width (3)
452  97.0f, 145.0f, 215.0f,
453  115.0f, 116.0f, 238.0f,
454  226.0f, 16.0f, 132.0f,
455  92.0f, 125.0f, 88.0f
456  };
457  std::vector<float> expectedOutputValues
458  {
459  // Batch 0, Channel 0, Height (4) x Width (3)
460  235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
461  46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
462  178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
463  100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
464  123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
465  19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
466  172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
467  74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
468  250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
469  6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
470  195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
471  80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
472 
473  // Batch 0, Channel 1, Height (4) x Width (3)
474  113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
475  95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
476  202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
477  77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
478  114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
479  71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
480  122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
481  246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
482  166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
483  82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
484  28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
485  37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
486 
487  // Batch 0, Channel 2, Height (4) x Width (3)
488  56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
489  170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
490  162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
491  194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
492  89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
493  254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
494  12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
495  209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
496  200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
497  1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
498  64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
499  54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
500 
501  // Batch 1, Channel 0, Height (4) x Width (3)
502  67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
503  90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
504  49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
505  7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
506  163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
507  18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
508  25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
509  117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
510  103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
511  247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
512  59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
513  189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
514 
515  // Batch 1, Channel 1, Height (4) x Width (3)
516  239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
517  104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
518  199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
519  17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
520  124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
521  153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
522  222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
523  217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
524  75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
525  32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
526  126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
527  21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
528 
529  // Batch 1, Channel 2, Height (4) x Width (3)
530  97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
531  145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
532  215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
533  115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
534  116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
535  238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
536  226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
537  16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
538  132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
539  92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
540  125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
541  88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
542  };
543 
544  return L2NormalizationTestImpl<ArmnnType>(
545  workloadFactory,
546  memoryManager,
547  tensorHandleFactory,
548  inputOutputShape,
549  scale,
550  offset,
551  inputValues,
552  outScale,
553  outOffset,
554  expectedOutputValues,
555  layout);
556 }
557 
558 } // anonymous namespace
559 
561  armnn::IWorkloadFactory& workloadFactory,
563  const armnn::ITensorHandleFactory& tensorHandleFactory,
564  const armnn::DataLayout layout)
565 {
566  // Dummy descriptor to get the default value of epsilon.
568 
569  return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
570  workloadFactory,
571  memoryManager,
572  tensorHandleFactory,
573  0.f,
574  0,
575  0.f,
576  0,
577  layout,
578  descriptor.m_Eps);
579 }
580 
582  armnn::IWorkloadFactory& workloadFactory,
584  const armnn::ITensorHandleFactory& tensorHandleFactory,
585  const armnn::DataLayout layout)
586 {
587  return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
588  workloadFactory,
589  memoryManager,
590  tensorHandleFactory,
591  0.f,
592  0,
593  0.f,
594  0,
595  layout,
596  1e-9f);
597 }
598 
600  armnn::IWorkloadFactory& workloadFactory,
602  const armnn::ITensorHandleFactory& tensorHandleFactory,
603  const armnn::DataLayout layout)
604 {
605  return L2Normalization1dTestCommon<armnn::DataType::Float32>(
606  workloadFactory,
607  memoryManager,
608  tensorHandleFactory,
609  0.f,
610  0,
611  0.f,
612  0,
613  layout);
614 }
615 
617  armnn::IWorkloadFactory& workloadFactory,
619  const armnn::ITensorHandleFactory& tensorHandleFactory,
620  const armnn::DataLayout layout)
621 {
622  return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
623  workloadFactory,
624  memoryManager,
625  tensorHandleFactory,
626  1.f,
627  0,
628  1.f,
629  0,
630  layout);
631 }
632 
634  armnn::IWorkloadFactory& workloadFactory,
636  const armnn::ITensorHandleFactory& tensorHandleFactory,
637  const armnn::DataLayout layout)
638 {
639  return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
640  workloadFactory,
641  memoryManager,
642  tensorHandleFactory,
643  1.f,
644  0,
645  1.f / 128,
646  128,
647  layout);
648 }
649 
651  armnn::IWorkloadFactory& workloadFactory,
653  const armnn::ITensorHandleFactory& tensorHandleFactory,
654  const armnn::DataLayout layout)
655 {
656  return L2Normalization2dTestCommon<armnn::DataType::Float32>(
657  workloadFactory,
658  memoryManager,
659  tensorHandleFactory,
660  0.f,
661  0,
662  0.f,
663  0,
664  layout);
665 }
666 
668  armnn::IWorkloadFactory& workloadFactory,
670  const armnn::ITensorHandleFactory& tensorHandleFactory,
671  const armnn::DataLayout layout)
672 {
673  return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
674  workloadFactory,
675  memoryManager,
676  tensorHandleFactory,
677  1.f,
678  0,
679  1.f,
680  0,
681  layout);
682 }
683 
685  armnn::IWorkloadFactory& workloadFactory,
687  const armnn::ITensorHandleFactory& tensorHandleFactory,
688  const armnn::DataLayout layout)
689 {
690  return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
691  workloadFactory,
692  memoryManager,
693  tensorHandleFactory,
694  1.f,
695  0,
696  1.f / 128,
697  128,
698  layout);
699 }
700 
702  armnn::IWorkloadFactory& workloadFactory,
704  const armnn::ITensorHandleFactory& tensorHandleFactory)
705 {
707  const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
708 
709  std::vector<float> inputData
710  {
711  1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
712  };
713  std::vector<float> expectedOutputData
714  {
715  1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
716  2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
717  3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
718  4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
719  5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
720  6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
721  7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
722  8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
723  9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
724  10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
725  };
726 
727  const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
728  const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
729 
730  std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
731 
732  std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
733  std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
734 
736  descriptor.m_Parameters.m_Eps = 1e-12f;
737  descriptor.m_Parameters.m_DataLayout = layout;
738  armnn::WorkloadInfo info;
739 
740  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
741  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
742 
743  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
744 
745  inputHandle->Allocate();
746  outputHandle->Allocate();
747 
748  CopyDataToITensorHandle(inputHandle.get(), inputData.data());
749 
750  workload->PostAllocationConfigure();
751  ExecuteWorkload(*workload, memoryManager);
752 
753  CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
754 
755  return LayerTestResult<float, 2>(actualOutput,
756  expectedOutputData,
757  outputHandle->GetShape(),
758  outputTensorInfo.GetShape());
759 }
760 
762  armnn::IWorkloadFactory& workloadFactory,
764  const armnn::ITensorHandleFactory& tensorHandleFactory,
765  const armnn::DataLayout layout)
766 {
767  return L2Normalization3dTestCommon<armnn::DataType::Float32>(
768  workloadFactory,
769  memoryManager,
770  tensorHandleFactory,
771  0.f,
772  0,
773  0.f,
774  0,
775  layout);
776 }
777 
779  armnn::IWorkloadFactory& workloadFactory,
781  const armnn::ITensorHandleFactory& tensorHandleFactory,
782  const armnn::DataLayout layout)
783 {
784  return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
785  workloadFactory,
786  memoryManager,
787  tensorHandleFactory,
788  1.f,
789  0,
790  1.f,
791  0,
792  layout);
793 }
794 
796  armnn::IWorkloadFactory& workloadFactory,
798  const armnn::ITensorHandleFactory& tensorHandleFactory,
799  const armnn::DataLayout layout)
800 {
801  return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
802  workloadFactory,
803  memoryManager,
804  tensorHandleFactory,
805  1.f,
806  0,
807  1.f / 128,
808  128,
809  layout);
810 }
811 
813  armnn::IWorkloadFactory& workloadFactory,
815  const armnn::ITensorHandleFactory& tensorHandleFactory,
816  const armnn::DataLayout layout)
817 {
818  return L2Normalization4dTestCommon<armnn::DataType::Float32>(
819  workloadFactory,
820  memoryManager,
821  tensorHandleFactory,
822  0.f,
823  0,
824  0.f,
825  0,
826  layout);
827 }
828 
830  armnn::IWorkloadFactory& workloadFactory,
832  const armnn::ITensorHandleFactory& tensorHandleFactory,
833  const armnn::DataLayout layout)
834 {
835  return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
836  workloadFactory,
837  memoryManager,
838  tensorHandleFactory,
839  1.f,
840  0,
841  1.f,
842  0,
843  layout);
844 }
845 
847  armnn::IWorkloadFactory& workloadFactory,
849  const armnn::ITensorHandleFactory& tensorHandleFactory,
850  const armnn::DataLayout layout)
851 {
852  return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
853  workloadFactory,
854  memoryManager,
855  tensorHandleFactory,
856  1.f,
857  0,
858  1.f / 128,
859  128,
860  layout);
861 }
float m_Eps
Used to avoid dividing by zero.
virtual std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
DataLayout
Definition: Types.hpp:49
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
LayerTestResult< float, 4 > L2Normalization4dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization3dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void IgnoreUnused(Ts &&...)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerTestResult< float, 4 > L2Normalization3dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
LayerTestResult< float, 4 > L2Normalization1dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
A L2NormalizationDescriptor for the L2NormalizationLayer.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< uint8_t, 4 > L2Normalization3dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization1dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< float, 2 > L2Normalization2dShapeTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)
Definition: TensorUtils.cpp:19
LayerTestResult< uint8_t, 4 > L2Normalization2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 4 > L2NormalizationDefaultEpsilonTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization4dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
Contains information about TensorInfos of a layer.
LayerTestResult< uint8_t, 4 > L2Normalization1dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< float, 4 > L2Normalization2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
unsigned int GetNumElements() const
Definition: Tensor.hpp:196
LayerTestResult< float, 4 > L2NormalizationNonDefaultEpsilonTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< uint8_t, 4 > L2Normalization4dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)