22 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
29 const std::vector<float>& inputValues,
32 const std::vector<float>& expectedOutputValues,
34 float epsilon = 1e-12f)
37 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
38 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
42 std::vector<float> inputData = inputValues;
45 std::vector<float> tmp(inputData.size());
46 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(),
sizeof(float));
50 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
51 armnnUtils::QuantizedVector<T>(inputData,
52 inputTensorInfo.GetQuantizationScale(),
53 inputTensorInfo.GetQuantizationOffset()));
55 std::vector<float> expectedOutputData = expectedOutputValues;
58 std::vector<float> tmp(expectedOutputData.size());
59 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
61 expectedOutputData = tmp;
65 result.outputExpected =
66 MakeTensor<T, 4>(outputTensorInfo,
67 armnnUtils::QuantizedVector<T>(expectedOutputData,
68 outputTensorInfo.GetQuantizationScale(),
69 outputTensorInfo.GetQuantizationOffset()));
72 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
73 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
78 descriptor.m_Parameters.m_DataLayout = layout;
81 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
82 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
86 inputHandle->Allocate();
87 outputHandle->Allocate();
91 workload->PostAllocationConfigure();
92 ExecuteWorkload(*workload, memoryManager);
99 float CalcInvL2Norm(std::initializer_list<float> elements)
101 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
102 [](
float acc,
float element) {
return acc + element * element; });
103 return 1.0f / sqrtf(reduction);
106 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
121 unsigned int numberOfBatches = 1;
122 unsigned int numberOfChannels = 3;
123 unsigned int height = 1;
124 unsigned int width = 1;
127 numberOfBatches, numberOfChannels, height, width, layout);
130 std::vector<float> inputValues
142 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
143 std::vector<float> expectedOutputValues
146 0.00000001f * approxInvL2Norm,
147 0.00000002f * approxInvL2Norm,
148 0.00000003f * approxInvL2Norm,
151 return L2NormalizationTestImpl<ArmnnType>(
160 expectedOutputValues,
166 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
180 unsigned int numberOfBatches = 1;
181 unsigned int numberOfChannels = 10;
182 unsigned int height = 1;
183 unsigned int width = 1;
187 numberOfBatches, numberOfChannels, height, width, layout);
188 std::vector<float> inputValues
220 const float approxInvL2Norm = 0.050964719f;
221 std::vector<float> expectedOutputValues
224 1.0f * approxInvL2Norm,
225 2.0f * approxInvL2Norm,
226 3.0f * approxInvL2Norm,
227 4.0f * approxInvL2Norm,
228 5.0f * approxInvL2Norm,
229 6.0f * approxInvL2Norm,
230 7.0f * approxInvL2Norm,
231 8.0f * approxInvL2Norm,
232 9.0f * approxInvL2Norm,
233 10.0f * approxInvL2Norm
237 return L2NormalizationTestImpl<ArmnnType>(
246 expectedOutputValues,
250 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
264 unsigned int numberOfBatches = 1;
265 unsigned int numberOfChannels = 2;
266 unsigned int height = 1;
267 unsigned int width = 5;
270 numberOfBatches, numberOfChannels, height, width, layout);
271 std::vector<float> inputValues
274 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
277 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
279 std::vector<float> expectedOutputValues
282 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
283 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
284 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
285 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
286 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
289 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
290 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
291 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
292 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
293 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
296 return L2NormalizationTestImpl<ArmnnType>(
305 expectedOutputValues,
309 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
323 unsigned int numberOfBatches = 1;
324 unsigned int numberOfChannels = 2;
325 unsigned int height = 4;
326 unsigned int width = 3;
329 numberOfBatches, numberOfChannels, height, width, layout);
330 std::vector<float> inputValues
333 119.0f, 21.0f, 150.0f,
334 149.0f, 32.0f, 179.0f,
335 15.0f, 227.0f, 141.0f,
336 147.0f, 199.0f, 220.0f,
339 110.0f, 140.0f, 73.0f,
340 211.0f, 212.0f, 89.0f,
341 24.0f, 138.0f, 188.0f,
342 162.0f, 12.0f, 161.0f
344 std::vector<float> expectedOutputValues
347 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
348 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
349 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
350 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
351 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
352 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
353 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
354 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
355 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
356 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
357 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
358 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
361 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
362 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
363 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
364 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
365 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
366 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
367 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
368 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
369 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
370 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
371 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
372 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
375 return L2NormalizationTestImpl<ArmnnType>(
384 expectedOutputValues,
388 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
402 unsigned int numberOfBatches = 2;
403 unsigned int numberOfChannels = 3;
404 unsigned int height = 4;
405 unsigned int width = 3;
408 numberOfBatches, numberOfChannels, height, width, layout);
409 std::vector<float> inputValues
412 235.0f, 46.0f, 178.0f,
413 100.0f, 123.0f, 19.0f,
414 172.0f, 74.0f, 250.0f,
418 113.0f, 95.0f, 202.0f,
419 77.0f, 114.0f, 71.0f,
420 122.0f, 246.0f, 166.0f,
424 56.0f, 170.0f, 162.0f,
425 194.0f, 89.0f, 254.0f,
426 12.0f, 209.0f, 200.0f,
432 25.0f, 117.0f, 103.0f,
433 247.0f, 59.0f, 189.0f,
436 239.0f, 104.0f, 199.0f,
437 17.0f, 124.0f, 153.0f,
438 222.0f, 217.0f, 75.0f,
439 32.0f, 126.0f, 21.0f,
442 97.0f, 145.0f, 215.0f,
443 115.0f, 116.0f, 238.0f,
444 226.0f, 16.0f, 132.0f,
447 std::vector<float> expectedOutputValues
450 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
451 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
452 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
453 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
454 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
455 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
456 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
457 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
458 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
459 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
460 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
461 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
464 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
465 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
466 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
467 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
468 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
469 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
470 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
471 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
472 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
473 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
474 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
475 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
478 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
479 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
480 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
481 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
482 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
483 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
484 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
485 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
486 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
487 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
488 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
489 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
492 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
493 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
494 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
495 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
496 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
497 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
498 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
499 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
500 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
501 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
502 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
503 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
506 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
507 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
508 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
509 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
510 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
511 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
512 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
513 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
514 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
515 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
516 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
517 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
520 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
521 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
522 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
523 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
524 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
525 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
526 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
527 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
528 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
529 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
530 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
531 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
534 return L2NormalizationTestImpl<ArmnnType>(
543 expectedOutputValues,
557 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
573 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
589 return L2Normalization1dTestCommon<armnn::DataType::Float32>(
604 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
619 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
634 return L2Normalization2dTestCommon<armnn::DataType::Float32>(
649 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
664 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
681 std::vector<float> inputData
683 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
685 std::vector<float> expectedOutputData
687 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
688 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
689 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
690 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
691 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
692 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
693 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
694 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
695 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
696 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
702 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, inputData);
705 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, expectedOutputData);
708 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
709 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
714 descriptor.m_Parameters.m_DataLayout = layout;
717 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
718 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
720 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateL2Normalization(descriptor, info);
722 inputHandle->Allocate();
723 outputHandle->Allocate();
727 workload->PostAllocationConfigure();
728 ExecuteWorkload(*workload, memoryManager);
740 return L2Normalization3dTestCommon<armnn::DataType::Float32>(
755 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
770 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
785 return L2Normalization4dTestCommon<armnn::DataType::Float32>(
800 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
815 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
float m_Eps
Used to avoid dividing by zero.
virtual std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
LayerTestResult< float, 4 > L2Normalization1dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
LayerTestResult< uint8_t, 4 > L2Normalization3dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
LayerTestResult< uint8_t, 4 > L2Normalization4dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
LayerTestResult< float, 4 > L2Normalization2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
LayerTestResult< float, 4 > L2NormalizationDefaultEpsilonTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
#define ARMNN_NO_DEPRECATE_WARN_END
A L2NormalizationDescriptor for the L2NormalizationLayer.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< int16_t, 4 > L2Normalization2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization3dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > L2Normalization1dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< uint8_t, 4 > L2Normalization2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< float, 2 > L2Normalization2dShapeTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > L2Normalization4dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
LayerTestResult< float, 4 > L2NormalizationNonDefaultEpsilonTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
Contains information about inputs and outputs to a layer.
LayerTestResult< float, 4 > L2Normalization3dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
LayerTestResult< float, 4 > L2Normalization4dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< int16_t, 4 > L2Normalization1dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::DataLayout layout)