24 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
32 const std::vector<float>& inputValues,
35 std::vector<float>& expectedOutputValues,
37 float epsilon = 1e-12f)
40 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
41 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
45 std::vector<float> inputData = inputValues;
48 std::vector<float> tmp(inputData.size());
49 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(),
sizeof(float));
53 auto inputTensor = armnnUtils::QuantizedVector<T>(inputData,
54 inputTensorInfo.GetQuantizationScale(),
55 inputTensorInfo.GetQuantizationOffset());
57 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
61 std::vector<float> tmp(expectedOutputValues.size());
62 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputValues.data(), tmp.data(),
64 expectedOutputValues = tmp;
67 std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputValues,
68 outputTensorInfo.GetQuantizationScale(),
69 outputTensorInfo.GetQuantizationOffset());
71 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
72 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
79 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
80 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
86 inputHandle->Allocate();
87 outputHandle->Allocate();
91 workload->PostAllocationConfigure();
92 ExecuteWorkload(*workload, memoryManager);
98 outputHandle->GetShape(),
99 outputTensorInfo.GetShape());
102 float CalcInvL2Norm(std::initializer_list<float> elements)
104 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
105 [](
float acc,
float element) {
return acc + element * element; });
106 return 1.0f / sqrtf(reduction);
109 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
125 unsigned int numberOfBatches = 1;
126 unsigned int numberOfChannels = 3;
127 unsigned int height = 1;
128 unsigned int width = 1;
131 numberOfBatches, numberOfChannels, height, width, layout);
134 std::vector<float> inputValues
146 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
147 std::vector<float> expectedOutputValues
150 0.00000001f * approxInvL2Norm,
151 0.00000002f * approxInvL2Norm,
152 0.00000003f * approxInvL2Norm,
155 return L2NormalizationTestImpl<ArmnnType>(
165 expectedOutputValues,
171 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
186 unsigned int numberOfBatches = 1;
187 unsigned int numberOfChannels = 10;
188 unsigned int height = 1;
189 unsigned int width = 1;
193 numberOfBatches, numberOfChannels, height, width, layout);
194 std::vector<float> inputValues
226 const float approxInvL2Norm = 0.050964719f;
227 std::vector<float> expectedOutputValues
230 1.0f * approxInvL2Norm,
231 2.0f * approxInvL2Norm,
232 3.0f * approxInvL2Norm,
233 4.0f * approxInvL2Norm,
234 5.0f * approxInvL2Norm,
235 6.0f * approxInvL2Norm,
236 7.0f * approxInvL2Norm,
237 8.0f * approxInvL2Norm,
238 9.0f * approxInvL2Norm,
239 10.0f * approxInvL2Norm
243 return L2NormalizationTestImpl<ArmnnType>(
253 expectedOutputValues,
257 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
272 unsigned int numberOfBatches = 1;
273 unsigned int numberOfChannels = 2;
274 unsigned int height = 1;
275 unsigned int width = 5;
278 numberOfBatches, numberOfChannels, height, width, layout);
279 std::vector<float> inputValues
282 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
285 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
287 std::vector<float> expectedOutputValues
290 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
291 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
292 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
293 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
294 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
297 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
298 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
299 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
300 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
301 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
304 return L2NormalizationTestImpl<ArmnnType>(
314 expectedOutputValues,
318 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
333 unsigned int numberOfBatches = 1;
334 unsigned int numberOfChannels = 2;
335 unsigned int height = 4;
336 unsigned int width = 3;
339 numberOfBatches, numberOfChannels, height, width, layout);
340 std::vector<float> inputValues
343 119.0f, 21.0f, 150.0f,
344 149.0f, 32.0f, 179.0f,
345 15.0f, 227.0f, 141.0f,
346 147.0f, 199.0f, 220.0f,
349 110.0f, 140.0f, 73.0f,
350 211.0f, 212.0f, 89.0f,
351 24.0f, 138.0f, 188.0f,
352 162.0f, 12.0f, 161.0f
354 std::vector<float> expectedOutputValues
357 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
358 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
359 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
360 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
361 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
362 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
363 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
364 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
365 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
366 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
367 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
368 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
371 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
372 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
373 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
374 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
375 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
376 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
377 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
378 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
379 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
380 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
381 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
382 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
385 return L2NormalizationTestImpl<ArmnnType>(
395 expectedOutputValues,
399 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
414 unsigned int numberOfBatches = 2;
415 unsigned int numberOfChannels = 3;
416 unsigned int height = 4;
417 unsigned int width = 3;
420 numberOfBatches, numberOfChannels, height, width, layout);
421 std::vector<float> inputValues
424 235.0f, 46.0f, 178.0f,
425 100.0f, 123.0f, 19.0f,
426 172.0f, 74.0f, 250.0f,
430 113.0f, 95.0f, 202.0f,
431 77.0f, 114.0f, 71.0f,
432 122.0f, 246.0f, 166.0f,
436 56.0f, 170.0f, 162.0f,
437 194.0f, 89.0f, 254.0f,
438 12.0f, 209.0f, 200.0f,
444 25.0f, 117.0f, 103.0f,
445 247.0f, 59.0f, 189.0f,
448 239.0f, 104.0f, 199.0f,
449 17.0f, 124.0f, 153.0f,
450 222.0f, 217.0f, 75.0f,
451 32.0f, 126.0f, 21.0f,
454 97.0f, 145.0f, 215.0f,
455 115.0f, 116.0f, 238.0f,
456 226.0f, 16.0f, 132.0f,
459 std::vector<float> expectedOutputValues
462 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
463 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
464 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
465 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
466 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
467 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
468 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
469 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
470 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
471 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
472 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
473 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
476 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
477 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
478 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
479 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
480 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
481 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
482 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
483 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
484 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
485 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
486 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
487 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
490 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
491 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
492 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
493 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
494 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
495 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
496 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
497 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
498 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
499 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
500 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
501 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
504 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
505 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
506 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
507 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
508 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
509 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
510 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
511 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
512 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
513 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
514 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
515 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
518 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
519 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
520 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
521 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
522 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
523 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
524 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
525 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
526 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
527 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
528 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
529 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
532 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
533 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
534 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
535 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
536 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
537 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
538 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
539 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
540 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
541 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
542 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
543 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
546 return L2NormalizationTestImpl<ArmnnType>(
556 expectedOutputValues,
571 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
589 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
607 return L2Normalization1dTestCommon<armnn::DataType::Float32>(
624 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
641 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
658 return L2Normalization2dTestCommon<armnn::DataType::Float32>(
675 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
692 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
711 std::vector<float> inputData
713 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
715 std::vector<float> expectedOutputData
717 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
718 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
719 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
720 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
721 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
722 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
723 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
724 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
725 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
726 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
732 std::vector<float> actualOutput(outputTensorInfo.
GetNumElements());
734 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
735 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
742 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
743 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
749 inputHandle->Allocate();
750 outputHandle->Allocate();
754 workload->PostAllocationConfigure();
755 ExecuteWorkload(*workload, memoryManager);
761 outputHandle->GetShape(),
771 return L2Normalization3dTestCommon<armnn::DataType::Float32>(
788 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
805 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
822 return L2Normalization4dTestCommon<armnn::DataType::Float32>(
839 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
856 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
float m_Eps
Used to avoid dividing by zero.
const TensorShape & GetShape() const
LayerTestResult< float, 4 > L2Normalization4dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization3dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerTestResult< float, 4 > L2Normalization3dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
LayerTestResult< float, 4 > L2Normalization1dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
A L2NormalizationDescriptor for the L2NormalizationLayer.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< uint8_t, 4 > L2Normalization3dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int16_t, 4 > L2Normalization1dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< float, 2 > L2Normalization2dShapeTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > L2Normalization2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< float, 4 > L2NormalizationDefaultEpsilonTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< int16_t, 4 > L2Normalization4dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
Contains information about TensorInfos of a layer.
LayerTestResult< uint8_t, 4 > L2Normalization1dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< float, 4 > L2Normalization2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
unsigned int GetNumElements() const
LayerTestResult< float, 4 > L2NormalizationNonDefaultEpsilonTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< uint8_t, 4 > L2Normalization4dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)