22 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
29 const std::vector<float>& inputValues,
32 const std::vector<float>& expectedOutputValues,
34 float epsilon = 1e-12f)
37 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
38 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
42 std::vector<float> inputData = inputValues;
45 std::vector<float> tmp(inputData.size());
46 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(),
sizeof(float));
50 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
51 armnnUtils::QuantizedVector<T>(inputData,
52 inputTensorInfo.GetQuantizationScale(),
53 inputTensorInfo.GetQuantizationOffset()));
55 std::vector<float> expectedOutputData = expectedOutputValues;
58 std::vector<float> tmp(expectedOutputData.size());
59 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
61 expectedOutputData = tmp;
65 result.outputExpected =
66 MakeTensor<T, 4>(outputTensorInfo,
67 armnnUtils::QuantizedVector<T>(expectedOutputData,
68 outputTensorInfo.GetQuantizationScale(),
69 outputTensorInfo.GetQuantizationOffset()));
71 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
72 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
79 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
80 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
84 inputHandle->Allocate();
85 outputHandle->Allocate();
89 workload->PostAllocationConfigure();
90 ExecuteWorkload(*workload, memoryManager);
97 float CalcInvL2Norm(std::initializer_list<float> elements)
99 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
100 [](
float acc,
float element) {
return acc + element * element; });
101 return 1.0f / sqrtf(reduction);
104 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
119 unsigned int numberOfBatches = 1;
120 unsigned int numberOfChannels = 3;
121 unsigned int height = 1;
122 unsigned int width = 1;
125 numberOfBatches, numberOfChannels, height, width, layout);
128 std::vector<float> inputValues
140 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
141 std::vector<float> expectedOutputValues
144 0.00000001f * approxInvL2Norm,
145 0.00000002f * approxInvL2Norm,
146 0.00000003f * approxInvL2Norm,
149 return L2NormalizationTestImpl<ArmnnType>(
158 expectedOutputValues,
164 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
178 unsigned int numberOfBatches = 1;
179 unsigned int numberOfChannels = 10;
180 unsigned int height = 1;
181 unsigned int width = 1;
185 numberOfBatches, numberOfChannels, height, width, layout);
186 std::vector<float> inputValues
218 const float approxInvL2Norm = 0.050964719f;
219 std::vector<float> expectedOutputValues
222 1.0f * approxInvL2Norm,
223 2.0f * approxInvL2Norm,
224 3.0f * approxInvL2Norm,
225 4.0f * approxInvL2Norm,
226 5.0f * approxInvL2Norm,
227 6.0f * approxInvL2Norm,
228 7.0f * approxInvL2Norm,
229 8.0f * approxInvL2Norm,
230 9.0f * approxInvL2Norm,
231 10.0f * approxInvL2Norm
235 return L2NormalizationTestImpl<ArmnnType>(
244 expectedOutputValues,
248 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
262 unsigned int numberOfBatches = 1;
263 unsigned int numberOfChannels = 2;
264 unsigned int height = 1;
265 unsigned int width = 5;
268 numberOfBatches, numberOfChannels, height, width, layout);
269 std::vector<float> inputValues
272 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
275 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
277 std::vector<float> expectedOutputValues
280 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
281 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
282 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
283 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
284 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
287 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
288 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
289 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
290 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
291 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
294 return L2NormalizationTestImpl<ArmnnType>(
303 expectedOutputValues,
307 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
321 unsigned int numberOfBatches = 1;
322 unsigned int numberOfChannels = 2;
323 unsigned int height = 4;
324 unsigned int width = 3;
327 numberOfBatches, numberOfChannels, height, width, layout);
328 std::vector<float> inputValues
331 119.0f, 21.0f, 150.0f,
332 149.0f, 32.0f, 179.0f,
333 15.0f, 227.0f, 141.0f,
334 147.0f, 199.0f, 220.0f,
337 110.0f, 140.0f, 73.0f,
338 211.0f, 212.0f, 89.0f,
339 24.0f, 138.0f, 188.0f,
340 162.0f, 12.0f, 161.0f
342 std::vector<float> expectedOutputValues
345 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
346 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
347 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
348 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
349 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
350 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
351 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
352 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
353 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
354 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
355 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
356 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
359 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
360 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
361 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
362 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
363 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
364 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
365 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
366 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
367 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
368 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
369 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
370 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
373 return L2NormalizationTestImpl<ArmnnType>(
382 expectedOutputValues,
386 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
400 unsigned int numberOfBatches = 2;
401 unsigned int numberOfChannels = 3;
402 unsigned int height = 4;
403 unsigned int width = 3;
406 numberOfBatches, numberOfChannels, height, width, layout);
407 std::vector<float> inputValues
410 235.0f, 46.0f, 178.0f,
411 100.0f, 123.0f, 19.0f,
412 172.0f, 74.0f, 250.0f,
416 113.0f, 95.0f, 202.0f,
417 77.0f, 114.0f, 71.0f,
418 122.0f, 246.0f, 166.0f,
422 56.0f, 170.0f, 162.0f,
423 194.0f, 89.0f, 254.0f,
424 12.0f, 209.0f, 200.0f,
430 25.0f, 117.0f, 103.0f,
431 247.0f, 59.0f, 189.0f,
434 239.0f, 104.0f, 199.0f,
435 17.0f, 124.0f, 153.0f,
436 222.0f, 217.0f, 75.0f,
437 32.0f, 126.0f, 21.0f,
440 97.0f, 145.0f, 215.0f,
441 115.0f, 116.0f, 238.0f,
442 226.0f, 16.0f, 132.0f,
445 std::vector<float> expectedOutputValues
448 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
449 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
450 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
451 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
452 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
453 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
454 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
455 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
456 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
457 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
458 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
459 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
462 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
463 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
464 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
465 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
466 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
467 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
468 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
469 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
470 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
471 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
472 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
473 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
476 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
477 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
478 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
479 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
480 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
481 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
482 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
483 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
484 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
485 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
486 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
487 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
490 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
491 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
492 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
493 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
494 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
495 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
496 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
497 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
498 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
499 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
500 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
501 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
504 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
505 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
506 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
507 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
508 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
509 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
510 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
511 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
512 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
513 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
514 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
515 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
518 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
519 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
520 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
521 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
522 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
523 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
524 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
525 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
526 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
527 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
528 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
529 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
532 return L2NormalizationTestImpl<ArmnnType>(
541 expectedOutputValues,
555 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
571 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
587 return L2Normalization1dTestCommon<armnn::DataType::Float32>(
602 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
617 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
632 return L2Normalization2dTestCommon<armnn::DataType::Float32>(
647 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
662 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
679 std::vector<float> inputData
681 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
683 std::vector<float> expectedOutputData
685 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
686 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
687 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
688 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
689 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
690 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
691 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
692 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
693 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
694 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
700 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, inputData);
703 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, expectedOutputData);
705 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
706 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
713 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
714 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
716 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateL2Normalization(descriptor, info);
718 inputHandle->Allocate();
719 outputHandle->Allocate();
723 workload->PostAllocationConfigure();
724 ExecuteWorkload(*workload, memoryManager);
736 return L2Normalization3dTestCommon<armnn::DataType::Float32>(
751 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
766 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
781 return L2Normalization4dTestCommon<armnn::DataType::Float32>(
796 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
811 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
float m_Eps
Used to avoid dividing by zero.
virtual std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > L2Normalization1dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
LayerTestResult< uint8_t, 4 > L2Normalization3dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
LayerTestResult< uint8_t, 4 > L2Normalization4dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerTestResult< float, 4 > L2Normalization2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
LayerTestResult< float, 4 > L2NormalizationDefaultEpsilonTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
A L2NormalizationDescriptor for the L2NormalizationLayer.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< int16_t, 4 > L2Normalization2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization3dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > L2Normalization1dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< uint8_t, 4 > L2Normalization2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< float, 2 > L2Normalization2dShapeTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > L2Normalization4dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
LayerTestResult< float, 4 > L2NormalizationNonDefaultEpsilonTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
Contains information about inputs and outputs to a layer.
LayerTestResult< float, 4 > L2Normalization3dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
LayerTestResult< float, 4 > L2Normalization4dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< int16_t, 4 > L2Normalization1dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)