22 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
30 const std::vector<float>& inputValues,
33 const std::vector<float>& expectedOutputValues,
35 float epsilon = 1e-12f)
38 const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
39 const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
43 std::vector<float> inputData = inputValues;
46 std::vector<float> tmp(inputData.size());
47 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(),
sizeof(float));
51 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
52 armnnUtils::QuantizedVector<T>(inputData,
53 inputTensorInfo.GetQuantizationScale(),
54 inputTensorInfo.GetQuantizationOffset()));
56 std::vector<float> expectedOutputData = expectedOutputValues;
59 std::vector<float> tmp(expectedOutputData.size());
60 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
62 expectedOutputData = tmp;
66 result.outputExpected =
67 MakeTensor<T, 4>(outputTensorInfo,
68 armnnUtils::QuantizedVector<T>(expectedOutputData,
69 outputTensorInfo.GetQuantizationScale(),
70 outputTensorInfo.GetQuantizationOffset()));
72 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
73 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
80 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
81 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
85 inputHandle->Allocate();
86 outputHandle->Allocate();
90 workload->PostAllocationConfigure();
91 ExecuteWorkload(*workload, memoryManager);
98 float CalcInvL2Norm(std::initializer_list<float> elements)
100 const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
101 [](
float acc,
float element) {
return acc + element * element; });
102 return 1.0f / sqrtf(reduction);
105 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
121 unsigned int numberOfBatches = 1;
122 unsigned int numberOfChannels = 3;
123 unsigned int height = 1;
124 unsigned int width = 1;
127 numberOfBatches, numberOfChannels, height, width, layout);
130 std::vector<float> inputValues
142 const float approxInvL2Norm = 1.f / sqrtf(epsilon);
143 std::vector<float> expectedOutputValues
146 0.00000001f * approxInvL2Norm,
147 0.00000002f * approxInvL2Norm,
148 0.00000003f * approxInvL2Norm,
151 return L2NormalizationTestImpl<ArmnnType>(
161 expectedOutputValues,
167 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
182 unsigned int numberOfBatches = 1;
183 unsigned int numberOfChannels = 10;
184 unsigned int height = 1;
185 unsigned int width = 1;
189 numberOfBatches, numberOfChannels, height, width, layout);
190 std::vector<float> inputValues
222 const float approxInvL2Norm = 0.050964719f;
223 std::vector<float> expectedOutputValues
226 1.0f * approxInvL2Norm,
227 2.0f * approxInvL2Norm,
228 3.0f * approxInvL2Norm,
229 4.0f * approxInvL2Norm,
230 5.0f * approxInvL2Norm,
231 6.0f * approxInvL2Norm,
232 7.0f * approxInvL2Norm,
233 8.0f * approxInvL2Norm,
234 9.0f * approxInvL2Norm,
235 10.0f * approxInvL2Norm
239 return L2NormalizationTestImpl<ArmnnType>(
249 expectedOutputValues,
253 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
268 unsigned int numberOfBatches = 1;
269 unsigned int numberOfChannels = 2;
270 unsigned int height = 1;
271 unsigned int width = 5;
274 numberOfBatches, numberOfChannels, height, width, layout);
275 std::vector<float> inputValues
278 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
281 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
283 std::vector<float> expectedOutputValues
286 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
287 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
288 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
289 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
290 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
293 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
294 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
295 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
296 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
297 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
300 return L2NormalizationTestImpl<ArmnnType>(
310 expectedOutputValues,
314 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
329 unsigned int numberOfBatches = 1;
330 unsigned int numberOfChannels = 2;
331 unsigned int height = 4;
332 unsigned int width = 3;
335 numberOfBatches, numberOfChannels, height, width, layout);
336 std::vector<float> inputValues
339 119.0f, 21.0f, 150.0f,
340 149.0f, 32.0f, 179.0f,
341 15.0f, 227.0f, 141.0f,
342 147.0f, 199.0f, 220.0f,
345 110.0f, 140.0f, 73.0f,
346 211.0f, 212.0f, 89.0f,
347 24.0f, 138.0f, 188.0f,
348 162.0f, 12.0f, 161.0f
350 std::vector<float> expectedOutputValues
353 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
354 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
355 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
356 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
357 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
358 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
359 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
360 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
361 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
362 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
363 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
364 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
367 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
368 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
369 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
370 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
371 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
372 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
373 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
374 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
375 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
376 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
377 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
378 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
381 return L2NormalizationTestImpl<ArmnnType>(
391 expectedOutputValues,
395 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
410 unsigned int numberOfBatches = 2;
411 unsigned int numberOfChannels = 3;
412 unsigned int height = 4;
413 unsigned int width = 3;
416 numberOfBatches, numberOfChannels, height, width, layout);
417 std::vector<float> inputValues
420 235.0f, 46.0f, 178.0f,
421 100.0f, 123.0f, 19.0f,
422 172.0f, 74.0f, 250.0f,
426 113.0f, 95.0f, 202.0f,
427 77.0f, 114.0f, 71.0f,
428 122.0f, 246.0f, 166.0f,
432 56.0f, 170.0f, 162.0f,
433 194.0f, 89.0f, 254.0f,
434 12.0f, 209.0f, 200.0f,
440 25.0f, 117.0f, 103.0f,
441 247.0f, 59.0f, 189.0f,
444 239.0f, 104.0f, 199.0f,
445 17.0f, 124.0f, 153.0f,
446 222.0f, 217.0f, 75.0f,
447 32.0f, 126.0f, 21.0f,
450 97.0f, 145.0f, 215.0f,
451 115.0f, 116.0f, 238.0f,
452 226.0f, 16.0f, 132.0f,
455 std::vector<float> expectedOutputValues
458 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
459 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
460 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
461 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
462 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
463 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
464 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
465 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
466 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
467 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
468 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
469 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
472 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
473 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
474 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
475 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
476 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
477 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
478 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
479 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
480 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
481 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
482 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
483 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
486 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
487 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
488 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
489 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
490 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
491 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
492 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
493 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
494 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
495 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
496 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
497 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
500 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
501 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
502 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
503 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
504 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
505 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
506 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
507 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
508 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
509 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
510 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
511 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
514 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
515 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
516 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
517 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
518 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
519 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
520 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
521 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
522 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
523 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
524 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
525 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
528 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
529 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
530 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
531 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
532 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
533 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
534 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
535 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
536 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
537 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
538 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
539 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
542 return L2NormalizationTestImpl<ArmnnType>(
552 expectedOutputValues,
567 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
585 return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
603 return L2Normalization1dTestCommon<armnn::DataType::Float32>(
620 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
637 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
654 return L2Normalization2dTestCommon<armnn::DataType::Float32>(
671 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
688 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
707 std::vector<float> inputData
709 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f
711 std::vector<float> expectedOutputData
713 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
714 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
715 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
716 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
717 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
718 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
719 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
720 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
721 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
722 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
728 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, inputData);
731 result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, expectedOutputData);
733 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
734 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
741 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
742 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
744 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateL2Normalization(descriptor, info);
746 inputHandle->Allocate();
747 outputHandle->Allocate();
751 workload->PostAllocationConfigure();
752 ExecuteWorkload(*workload, memoryManager);
765 return L2Normalization3dTestCommon<armnn::DataType::Float32>(
782 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
799 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
816 return L2Normalization4dTestCommon<armnn::DataType::Float32>(
833 return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
850 return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
float m_Eps
Used to avoid dividing by zero.
virtual std::unique_ptr< IWorkload > CreateL2Normalization(const L2NormalizationQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > L2Normalization4dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization3dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
LayerTestResult< float, 4 > L2Normalization3dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
LayerTestResult< float, 4 > L2Normalization1dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
A L2NormalizationDescriptor for the L2NormalizationLayer.
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< uint8_t, 4 > L2Normalization3dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization1dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< float, 2 > L2Normalization2dShapeTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)
LayerTestResult< uint8_t, 4 > L2Normalization2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 4 > L2NormalizationDefaultEpsilonTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization4dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
Contains information about inputs and outputs to a layer.
LayerTestResult< uint8_t, 4 > L2Normalization1dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< int16_t, 4 > L2Normalization2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< float, 4 > L2Normalization2dTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
LayerTestResult< float, 4 > L2NormalizationNonDefaultEpsilonTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< uint8_t, 4 > L2Normalization4dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory, const armnn::DataLayout layout)