aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/LayerTests.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon/test/LayerTests.cpp')
-rw-r--r--src/backends/backendsCommon/test/LayerTests.cpp302
1 files changed, 196 insertions, 106 deletions
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index af426a470b..3216ac68ef 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -5283,17 +5283,19 @@ LayerTestResult<float, 2> FakeQuantizationTest(
namespace
{
-
-LayerTestResult<float, 4> L2NormalizationTestImpl(
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> L2NormalizationTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ float qScale,
+ int32_t qOffset,
const armnn::TensorShape& inputOutputTensorShape,
const std::vector<float>& inputValues,
const std::vector<float>& expectedOutputValues,
const armnn::DataLayout layout)
{
- const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
+ const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, qScale, qOffset);
+ const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, qScale, qOffset);
// at this point if we require it permute the input data
const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
@@ -5305,18 +5307,25 @@ LayerTestResult<float, 4> L2NormalizationTestImpl(
inputData = tmp;
}
- auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputData));
+ auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
+ inputTensorInfo.GetQuantizationScale(),
+ inputTensorInfo.GetQuantizationOffset(),
+ inputData));
- LayerTestResult<float, 4> result(outputTensorInfo);
std::vector<float> expectedOutputData = expectedOutputValues;
if (layout == armnn::DataLayout::NHWC)
{
std::vector<float> tmp(expectedOutputData.size());
- armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC,
- expectedOutputData.data(), tmp.data(), sizeof(float));
+ armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, expectedOutputData.data(), tmp.data(),
+ sizeof(float));
expectedOutputData = tmp;
}
- result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputData));
+
+ LayerTestResult<T, 4> result(outputTensorInfo);
+ result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+ outputTensorInfo.GetQuantizationScale(),
+ outputTensorInfo.GetQuantizationOffset(),
+ expectedOutputData));
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -5786,10 +5795,13 @@ LayerTestResult<float, 4> PadFloat324dTest(
return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-LayerTestResult<float, 4> L2Normalization1dTest(
- armnn::IWorkloadFactory& workloadFactory,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
- const armnn::DataLayout layout)
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> L2Normalization1dTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ float qScale,
+ int32_t qOffset,
+ const armnn::DataLayout layout)
{
// Width: 1
// Height: 1
@@ -5806,31 +5818,31 @@ LayerTestResult<float, 4> L2Normalization1dTest(
std::vector<float> inputValues
{
// Batch 0, Channel 0, Height (1) x Width (1)
- 1.0f,
+ 1.0f,
// Batch 0, Channel 1, Height (1) x Width (1)
- 2.0f,
+ 2.0f,
// Batch 0, Channel 2, Height (1) x Width (1)
- 3.0f,
+ 3.0f,
// Batch 0, Channel 3, Height (1) x Width (1)
- 4.0f,
+ 4.0f,
// Batch 0, Channel 4, Height (1) x Width (1)
- 5.0f,
+ 5.0f,
// Batch 0, Channel 5, Height (1) x Width (1)
- 6.0f,
+ 6.0f,
// Batch 0, Channel 6, Height (1) x Width (1)
- 7.0f,
+ 7.0f,
// Batch 0, Channel 7, Height (1) x Width (1)
- 8.0f,
+ 8.0f,
// Batch 0, Channel 8, Height (1) x Width (1)
- 9.0f,
+ 9.0f,
// Batch 0, Channel 9, Height (1) x Width (1)
10.0f
@@ -5839,28 +5851,49 @@ LayerTestResult<float, 4> L2Normalization1dTest(
std::vector<float> expectedOutputValues
{
// Batch 0, Channel 0, Height (1) x Width (1)
- 1.0f * approxInvL2Norm,
- 2.0f * approxInvL2Norm,
- 3.0f * approxInvL2Norm,
- 4.0f * approxInvL2Norm,
- 5.0f * approxInvL2Norm,
- 6.0f * approxInvL2Norm,
- 7.0f * approxInvL2Norm,
- 8.0f * approxInvL2Norm,
- 9.0f * approxInvL2Norm,
+ 1.0f * approxInvL2Norm,
+ 2.0f * approxInvL2Norm,
+ 3.0f * approxInvL2Norm,
+ 4.0f * approxInvL2Norm,
+ 5.0f * approxInvL2Norm,
+ 6.0f * approxInvL2Norm,
+ 7.0f * approxInvL2Norm,
+ 8.0f * approxInvL2Norm,
+ 9.0f * approxInvL2Norm,
10.0f * approxInvL2Norm
};
- return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
- inputValues, expectedOutputValues, layout);
+ return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, qScale, qOffset, inputOutputShape,
+ inputValues, expectedOutputValues, layout);
}
-LayerTestResult<float, 4> L2Normalization2dTest(
+
+LayerTestResult<float, 4> L2Normalization1dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
+ return L2Normalization1dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, layout);
+}
+
+LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout layout)
+{
+ return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0,
+ layout);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> L2Normalization2dTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ float qScale,
+ int32_t qOffset,
+ const armnn::DataLayout layout)
+{
// Width: 5
// Height: 1
// Channels: 2
@@ -5883,29 +5916,49 @@ LayerTestResult<float, 4> L2Normalization2dTest(
std::vector<float> expectedOutputValues
{
// Batch 0, Channel 0, Height (1) x Width (5)
- 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
- 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
- 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
- 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
- 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
+ 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
+ 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
+ 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
+ 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
+ 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
// Batch 0, Channel 1, Height (1) x Width (5)
- 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
- 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
- 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
- 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
+ 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
+ 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
+ 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
+ 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
};
- return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
- inputValues, expectedOutputValues, layout);
+ return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, qScale, qOffset, inputOutputShape,
+ inputValues, expectedOutputValues, layout);
}
-LayerTestResult<float, 4> L2Normalization3dTest(
+LayerTestResult<float, 4> L2Normalization2dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout layout)
+{
+ return L2Normalization2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, layout);
+}
+
+LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
+ return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0,
+ layout);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> L2Normalization3dTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ float qScale,
+ int32_t qOffset,
+ const armnn::DataLayout layout)
+{
// Width: 3
// Height: 4
// Channels: 2
@@ -5922,25 +5975,25 @@ LayerTestResult<float, 4> L2Normalization3dTest(
// Batch 0, Channel 0, Height (4) x Width (3)
119.0f, 21.0f, 150.0f,
149.0f, 32.0f, 179.0f,
- 15.0f, 227.0f, 141.0f,
+ 15.0f, 227.0f, 141.0f,
147.0f, 199.0f, 220.0f,
// Batch 0, Channel 1, Height (4) x Width (3)
110.0f, 140.0f, 73.0f,
211.0f, 212.0f, 89.0f,
- 24.0f, 138.0f, 188.0f,
+ 24.0f, 138.0f, 188.0f,
162.0f, 12.0f, 161.0f
};
std::vector<float> expectedOutputValues
{
// Batch 0, Channel 0, Height (4) x Width (3)
119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
- 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
+ 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
- 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
+ 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
- 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
+ 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
@@ -5950,28 +6003,48 @@ LayerTestResult<float, 4> L2Normalization3dTest(
// Batch 0, Channel 1, Height (4) x Width (3)
110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
- 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
+ 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
- 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
- 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
+ 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
+ 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
- 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
+ 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
};
- return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
- inputValues, expectedOutputValues, layout);
+ return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, qScale, qOffset, inputOutputShape,
+ inputValues, expectedOutputValues, layout);
}
-LayerTestResult<float, 4> L2Normalization4dTest(
+LayerTestResult<float, 4> L2Normalization3dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout layout)
+{
+ return L2Normalization3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, layout);
+}
+
+LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout layout)
+{
+ return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0,
+ layout);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> L2Normalization4dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ float qScale,
+ int32_t qOffset,
const armnn::DataLayout layout)
{
- // Width: 3
+ // Width: 3
// Height: 4
// Channels: 3
// BatchSize: 2
@@ -5988,127 +6061,144 @@ LayerTestResult<float, 4> L2Normalization4dTest(
235.0f, 46.0f, 178.0f,
100.0f, 123.0f, 19.0f,
172.0f, 74.0f, 250.0f,
- 6.0f, 195.0f, 80.0f,
+ 6.0f, 195.0f, 80.0f,
// Batch 0, Channel 1, Height (4) x Width (3)
113.0f, 95.0f, 202.0f,
- 77.0f, 114.0f, 71.0f,
+ 77.0f, 114.0f, 71.0f,
122.0f, 246.0f, 166.0f,
- 82.0f, 28.0f, 37.0f,
+ 82.0f, 28.0f, 37.0f,
// Batch 0, Channel 2, Height (4) x Width (3)
- 56.0f, 170.0f, 162.0f,
+ 56.0f, 170.0f, 162.0f,
194.0f, 89.0f, 254.0f,
- 12.0f, 209.0f, 200.0f,
- 1.0f, 64.0f, 54.0f,
+ 12.0f, 209.0f, 200.0f,
+ 1.0f, 64.0f, 54.0f,
// Batch 1, Channel 0, Height (4) x Width (3)
- 67.0f, 90.0f, 49.0f,
- 7.0f, 163.0f, 18.0f,
- 25.0f, 117.0f, 103.0f,
+ 67.0f, 90.0f, 49.0f,
+ 7.0f, 163.0f, 18.0f,
+ 25.0f, 117.0f, 103.0f,
247.0f, 59.0f, 189.0f,
// Batch 1, Channel 1, Height (4) x Width (3)
239.0f, 104.0f, 199.0f,
- 17.0f, 124.0f, 153.0f,
+ 17.0f, 124.0f, 153.0f,
222.0f, 217.0f, 75.0f,
- 32.0f, 126.0f, 21.0f,
+ 32.0f, 126.0f, 21.0f,
// Batch 1, Channel 2, Height (4) x Width (3)
- 97.0f, 145.0f, 215.0f,
+ 97.0f, 145.0f, 215.0f,
115.0f, 116.0f, 238.0f,
226.0f, 16.0f, 132.0f,
- 92.0f, 125.0f, 88.0f
+ 92.0f, 125.0f, 88.0f
};
std::vector<float> expectedOutputValues
{
// Batch 0, Channel 0, Height (4) x Width (3)
235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
- 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
+ 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
- 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
+ 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
- 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
+ 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
- 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
+ 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
- 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
+ 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
// Batch 0, Channel 1, Height (4) x Width (3)
113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
- 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
+ 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
- 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
+ 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
- 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
+ 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
- 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
- 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
- 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
+ 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
+ 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
+ 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
// Batch 0, Channel 2, Height (4) x Width (3)
- 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
+ 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
- 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
+ 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
- 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
+ 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
- 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
- 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
- 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
+ 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
+ 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
+ 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
// Batch 1, Channel 0, Height (4) x Width (3)
- 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
- 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
- 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
- 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
+ 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
+ 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
+ 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
+ 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
- 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
- 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
+ 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
+ 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
- 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
+ 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
// Batch 1, Channel 1, Height (4) x Width (3)
239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
- 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
+ 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
- 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
- 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
+ 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
+ 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
- 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
+ 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
// Batch 1, Channel 2, Height (4) x Width (3)
- 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
+ 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
- 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
+ 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
- 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
+ 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
- 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
+ 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
};
- return L2NormalizationTestImpl(workloadFactory, memoryManager, inputOutputShape,
- inputValues, expectedOutputValues, layout);
+ return L2NormalizationTestImpl<ArmnnType>(workloadFactory, memoryManager, qScale, qOffset, inputOutputShape,
+ inputValues, expectedOutputValues, layout);
+}
+
+LayerTestResult<float, 4> L2Normalization4dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout layout)
+{
+ return L2Normalization4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.f, 0, layout);
+}
+
+LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::DataLayout layout)
+{
+ return L2Normalization1dTestCommon<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.f, 0,
+ layout);
}
template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>