aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2018-10-01 09:26:39 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-10 16:16:58 +0100
commit539b44dbd620c9f793f84933c1bcc51ce3ff085e (patch)
tree4d75b13cd8ac213f16279ecf54b2ce9c20d04b9a
parent084523a492efd8001cd8e07956d45c9aeb2bb046 (diff)
downloadarmnn-539b44dbd620c9f793f84933c1bcc51ce3ff085e.tar.gz
IVGCVSW-1863 Unit tests for NHWC L2Normalization
* Added NHWC unit test implementation * Programmatically selected the channel dimension when creating the ACL Normalization layer info * Set the input/output data layout in the constructor of the L2Normalization workload Change-Id: Ie69f1a360022c29d1a3a3808c1f26b69243fa8f9
-rw-r--r--src/backends/aclCommon/ArmComputeUtils.hpp6
-rw-r--r--src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp14
-rw-r--r--src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp11
-rw-r--r--src/backends/test/ArmComputeCl.cpp6
-rw-r--r--src/backends/test/ArmComputeNeon.cpp13
-rw-r--r--src/backends/test/BatchNormTestImpl.hpp7
-rw-r--r--src/backends/test/ClContextControlFixture.hpp17
-rw-r--r--src/backends/test/LayerTests.cpp593
-rw-r--r--src/backends/test/LayerTests.hpp5
-rw-r--r--src/backends/test/Reference.cpp8
10 files changed, 496 insertions, 184 deletions
diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp
index db472964ea..ec3701bb35 100644
--- a/src/backends/aclCommon/ArmComputeUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeUtils.hpp
@@ -15,9 +15,11 @@ namespace armnn
{
inline arm_compute::NormalizationLayerInfo
-CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo& tensorInfo)
+CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo& tensorInfo,
+ armnn::DataLayout dataLayout)
{
- const unsigned int depth = tensorInfo.GetShape()[1];
+ unsigned int depthDimension = dataLayout == armnn::DataLayout::NCHW ? 1 : 3;
+ const unsigned int depth = tensorInfo.GetShape()[depthDimension];
// At the time of writing, {CL|Neon}L2Normalization performs the reduction only along dimension 0. This version of
// L2 Normalization always performs the reduction along the depth axis, though. Thus, we repurpose
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
index edc13bcfea..f84801601a 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
@@ -22,7 +22,7 @@ arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
arm_compute::NormalizationLayerInfo normalizationInfo =
- CreateAclNormalizationLayerInfoForL2Normalization(input);
+ CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout);
return arm_compute::CLNormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
}
@@ -35,7 +35,14 @@ ClL2NormalizationFloatWorkload::ClL2NormalizationFloatWorkload(const L2Normaliza
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- m_Layer.configure(&input, &output, CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0]));
+
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
+ m_Layer.configure(&input, &output,
+ CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0],
+ m_Data.m_Parameters.m_DataLayout));
}
void ClL2NormalizationFloatWorkload::Execute() const
@@ -45,6 +52,3 @@ void ClL2NormalizationFloatWorkload::Execute() const
}
} //namespace armnn
-
-
-
diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
index 4bddd9a24c..17c39bc8ad 100644
--- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
@@ -18,7 +18,7 @@ arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo& input,
const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
arm_compute::NormalizationLayerInfo normalizationInfo =
- CreateAclNormalizationLayerInfoForL2Normalization(input);
+ CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout);
return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
}
@@ -32,7 +32,14 @@ NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2Norma
arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- m_Layer.configure(&input, &output, CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0]));
+
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
+ m_Layer.configure(&input, &output,
+ CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0],
+ m_Data.m_Parameters.m_DataLayout));
}
void NeonL2NormalizationFloatWorkload::Execute() const
diff --git a/src/backends/test/ArmComputeCl.cpp b/src/backends/test/ArmComputeCl.cpp
index a106c789ae..af30ff0c29 100644
--- a/src/backends/test/ArmComputeCl.cpp
+++ b/src/backends/test/ArmComputeCl.cpp
@@ -174,11 +174,17 @@ ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1DVectorUint8, MultiplicationBroadca
// Batch Norm
ARMNN_AUTO_TEST_CASE(BatchNorm, BatchNormTest)
+// L2 Normalization
ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest)
ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest)
ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest)
ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization1dNhwc, L2Normalization1dNhwcTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dNhwcTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dNhwcTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dNhwcTest)
+
// Resize Bilinear
ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest)
ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest)
diff --git a/src/backends/test/ArmComputeNeon.cpp b/src/backends/test/ArmComputeNeon.cpp
index 66cce250cd..214f8d8f05 100644
--- a/src/backends/test/ArmComputeNeon.cpp
+++ b/src/backends/test/ArmComputeNeon.cpp
@@ -368,10 +368,15 @@ ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDims, Concatenation3dDim2DiffIn
ARMNN_AUTO_TEST_CASE(Concatenation3dDim2DiffInputDimsUint8, Concatenation3dDim2DiffInputDimsUint8Test)
// L2 Normalization
-ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest);
-ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest);
-ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest);
-ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest);
+ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest)
+
+ARMNN_AUTO_TEST_CASE(L2Normalization1dNhwc, L2Normalization1dNhwcTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dNhwcTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dNhwcTest)
+ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dNhwcTest)
// Floor
ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest)
diff --git a/src/backends/test/BatchNormTestImpl.hpp b/src/backends/test/BatchNormTestImpl.hpp
index d551221ae1..ab5413d277 100644
--- a/src/backends/test/BatchNormTestImpl.hpp
+++ b/src/backends/test/BatchNormTestImpl.hpp
@@ -17,8 +17,8 @@
template<typename T>
LayerTestResult<T,4> BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
- float qScale,
- int32_t qOffset)
+ float qScale,
+ int32_t qOffset)
{
const unsigned int width = 2;
const unsigned int height = 3;
@@ -103,9 +103,10 @@ LayerTestResult<T,4> BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ workloadFactory.Finalize();
workload->Execute();
CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
return ret;
-} \ No newline at end of file
+}
diff --git a/src/backends/test/ClContextControlFixture.hpp b/src/backends/test/ClContextControlFixture.hpp
index c81428ff82..fd53e3fcf3 100644
--- a/src/backends/test/ClContextControlFixture.hpp
+++ b/src/backends/test/ClContextControlFixture.hpp
@@ -10,9 +10,22 @@
template<bool ProfilingEnabled>
struct ClContextControlFixtureBase
{
+ static ClContextControlFixtureBase*& Instance()
+ {
+ static ClContextControlFixtureBase* s_Instance = nullptr;
+ return s_Instance;
+ }
+
// Initialising ClContextControl to ensure OpenCL is loaded correctly for each test case
- ClContextControlFixtureBase() : m_ClContextControl(nullptr, ProfilingEnabled) {}
- ~ClContextControlFixtureBase() {}
+ ClContextControlFixtureBase()
+ : m_ClContextControl(nullptr, ProfilingEnabled)
+ {
+ Instance() = this;
+ }
+ ~ClContextControlFixtureBase()
+ {
+ Instance() = nullptr;
+ }
armnn::ClContextControl m_ClContextControl;
};
diff --git a/src/backends/test/LayerTests.cpp b/src/backends/test/LayerTests.cpp
index 267a8d6bad..78d4d62089 100644
--- a/src/backends/test/LayerTests.cpp
+++ b/src/backends/test/LayerTests.cpp
@@ -39,6 +39,8 @@
#include "ConvertFp16ToFp32TestImpl.hpp"
#include "ConvertFp32ToFp16TestImpl.hpp"
+#include "ClContextControlFixture.hpp"
+
// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
static std::vector<float> ConvInput3x8x16({
0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
@@ -3159,47 +3161,30 @@ LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workload
return ret;
}
-LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory)
+namespace
{
- constexpr unsigned int inputWidth = 1;
- constexpr unsigned int inputHeight = 1;
- constexpr unsigned int inputChannels = 10;
- constexpr unsigned int inputBatchSize = 1;
-
- constexpr unsigned int outputWidth = inputWidth;
- constexpr unsigned int outputHeight = inputHeight;
- constexpr unsigned int outputChannels = inputChannels;
- constexpr unsigned int outputBatchSize = inputBatchSize;
- const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::DataType::Float32);
+LayerTestResult<float, 4> L2NormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::TensorShape& inputOutputTensorShape,
+ const std::vector<float>& inputValues,
+ const std::vector<float>& expectedOutputValues,
+ armnn::DataLayout dataLayout)
+{
+ const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
+ const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
- auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
- 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f
- }));
+ auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputValues));
- const float approxInvL2Norm = 0.050964719f;
LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
- 1.0f * approxInvL2Norm,
- 2.0f * approxInvL2Norm,
- 3.0f * approxInvL2Norm,
- 4.0f * approxInvL2Norm,
- 5.0f * approxInvL2Norm,
- 6.0f * approxInvL2Norm,
- 7.0f * approxInvL2Norm,
- 8.0f * approxInvL2Norm,
- 9.0f * approxInvL2Norm,
- 10.0f * approxInvL2Norm
- }));
+ result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputValues));
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
armnn::L2NormalizationQueueDescriptor descriptor;
+ descriptor.m_Parameters.m_DataLayout = dataLayout;
armnn::WorkloadInfo info;
+
AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -3207,18 +3192,17 @@ LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloa
inputHandle->Allocate();
outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+ CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
workloadFactory.Finalize();
workload->Execute();
CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+
return result;
}
-namespace
-{
-
float CalcInvL2Norm(std::initializer_list<float> elements)
{
const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
@@ -3226,99 +3210,207 @@ float CalcInvL2Norm(std::initializer_list<float> elements)
return 1.0f / sqrtf(reduction);
}
-}
+} // anonymous namespace
-LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory)
{
- constexpr unsigned int inputWidth = 5;
- constexpr unsigned int inputHeight = 1;
- constexpr unsigned int inputChannels = 2;
- constexpr unsigned int inputBatchSize = 1;
+ // Width: 1
+ // Height: 1
+ // Channels: 10
+ // BatchSize: 1
- constexpr unsigned int outputWidth = inputWidth;
- constexpr unsigned int outputHeight = inputHeight;
- constexpr unsigned int outputChannels = inputChannels;
- constexpr unsigned int outputBatchSize = inputBatchSize;
+ const armnn::TensorShape inputOutputShape{ 1, 10, 1, 1 };
+ std::vector<float> inputValues
+ {
+ // Batch 0, Channel 0, Height (1) x Width (1)
+ 1.0f,
- const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::DataType::Float32);
+ // Batch 0, Channel 1, Height (1) x Width (1)
+ 2.0f,
- auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
- 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
- 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
- }));
+ // Batch 0, Channel 2, Height (1) x Width (1)
+ 3.0f,
- LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
- 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
- 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
- 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
- 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
- 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
+ // Batch 0, Channel 3, Height (1) x Width (1)
+ 4.0f,
- 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
- 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
- 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
- 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
- 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
- }));
+ // Batch 0, Channel 4, Height (1) x Width (1)
+ 5.0f,
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+ // Batch 0, Channel 5, Height (1) x Width (1)
+ 6.0f,
- armnn::L2NormalizationQueueDescriptor descriptor;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+ // Batch 0, Channel 6, Height (1) x Width (1)
+ 7.0f,
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
+ // Batch 0, Channel 7, Height (1) x Width (1)
+ 8.0f,
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ // Batch 0, Channel 8, Height (1) x Width (1)
+ 9.0f,
- workloadFactory.Finalize();
- workload->Execute();
+ // Batch 0, Channel 9, Height (1) x Width (1)
+ 10.0f
+ };
+ const float approxInvL2Norm = 0.050964719f;
+ std::vector<float> expectedOutputValues
+ {
+ // Batch 0, Channel 0, Height (1) x Width (1)
+ 1.0f * approxInvL2Norm,
+ 2.0f * approxInvL2Norm,
+ 3.0f * approxInvL2Norm,
+ 4.0f * approxInvL2Norm,
+ 5.0f * approxInvL2Norm,
+ 6.0f * approxInvL2Norm,
+ 7.0f * approxInvL2Norm,
+ 8.0f * approxInvL2Norm,
+ 9.0f * approxInvL2Norm,
+ 10.0f * approxInvL2Norm
+ };
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
+ return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+ inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
}
-LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> L2Normalization1dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
{
- constexpr unsigned int inputWidth = 3;
- constexpr unsigned int inputHeight = 4;
- constexpr unsigned int inputChannels = 2;
- constexpr unsigned int inputBatchSize = 1;
+#ifdef ARMCOMPUTECL_ENABLED
+ // Clear the CL cache before this test when using ACL
+ if (ClContextControlFixture::Instance())
+ {
+ ClContextControlFixture::Instance()->m_ClContextControl.ClearClCache();
+ }
+#endif
- constexpr unsigned int outputWidth = inputWidth;
- constexpr unsigned int outputHeight = inputHeight;
- constexpr unsigned int outputChannels = inputChannels;
- constexpr unsigned int outputBatchSize = inputBatchSize;
+ // Width: 1
+ // Height: 1
+ // Channels: 10
+ // BatchSize: 1
- const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::DataType::Float32);
+ const armnn::TensorShape inputOutputShape{ 1, 1, 1, 10 };
+ std::vector<float> inputValues
+ {
+ // Batch 0, Height 0, Width (1) x Channel (10)
+ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f
+ };
+ const float approxInvL2Norm = 0.050964719f;
+ std::vector<float> expectedOutputValues
+ {
+ // Batch 0, Height 0, Width (1) x Channel (10)
+ 1.0f * approxInvL2Norm,
+ 2.0f * approxInvL2Norm,
+ 3.0f * approxInvL2Norm,
+ 4.0f * approxInvL2Norm,
+ 5.0f * approxInvL2Norm,
+ 6.0f * approxInvL2Norm,
+ 7.0f * approxInvL2Norm,
+ 8.0f * approxInvL2Norm,
+ 9.0f * approxInvL2Norm,
+ 10.0f * approxInvL2Norm
+ };
- auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
- // Channel 0
+ return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+ inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+ // Width: 5
+ // Height: 1
+ // Channels: 2
+ // BatchSize: 1
+
+ const armnn::TensorShape inputOutputShape{ 1, 2, 1, 5 };
+ std::vector<float> inputValues
+ {
+ // Batch 0, Channel 0, Height (1) x Width (5)
+ 1.0f, 3.0f, 5.0f, 7.0f, 9.0f,
+
+ // Batch 0, Channel 1, Height (1) x Width (5)
+ 2.0f, 4.0f, 6.0f, 8.0f, 10.0f
+ };
+ std::vector<float> expectedOutputValues
+ {
+ // Batch 0, Channel 0, Height (1) x Width (5)
+ 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
+ 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
+ 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
+ 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
+ 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
+
+ // Batch 0, Channel 1, Height (1) x Width (5)
+ 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
+ 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
+ 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
+ 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
+ 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
+ };
+
+ return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+ inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> L2Normalization2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+ // Width: 5
+ // Height: 1
+ // Channels: 2
+ // BatchSize: 1
+
+ const armnn::TensorShape inputOutputShape{ 1, 1, 5, 2 };
+ std::vector<float> inputValues
+ {
+ // Batch 0, Height 0, Width (5) x Channel (2)
+ 1.0f, 2.0f,
+ 3.0f, 4.0f,
+ 5.0f, 6.0f,
+ 7.0f, 8.0f,
+ 9.0f, 10.0f
+ };
+ std::vector<float> expectedOutputValues
+ {
+ // Batch 0, Height 0, Width (5) x Channel (2)
+ 1.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
+ 2.0f * CalcInvL2Norm({ 1.0f, 2.0f }),
+ 3.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
+ 4.0f * CalcInvL2Norm({ 3.0f, 4.0f }),
+ 5.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
+ 6.0f * CalcInvL2Norm({ 5.0f, 6.0f }),
+ 7.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
+ 8.0f * CalcInvL2Norm({ 7.0f, 8.0f }),
+ 9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
+ 10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
+ };
+
+ return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+ inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+ // Width: 3
+ // Height: 4
+ // Channels: 2
+ // BatchSize: 1
+
+ const armnn::TensorShape inputOutputShape{ 1, 2, 4, 3 };
+ std::vector<float> inputValues
+ {
+ // Batch 0, Channel 0, Height (4) x Width (3)
119.0f, 21.0f, 150.0f,
149.0f, 32.0f, 179.0f,
15.0f, 227.0f, 141.0f,
147.0f, 199.0f, 220.0f,
- // Channel 1
+ // Batch 0, Channel 1, Height (4) x Width (3)
110.0f, 140.0f, 73.0f,
211.0f, 212.0f, 89.0f,
24.0f, 138.0f, 188.0f,
- 162.0f, 12.0f, 161.0f,
- }));
-
- LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
+ 162.0f, 12.0f, 161.0f
+ };
+ std::vector<float> expectedOutputValues
+ {
+ // Batch 0, Channel 0, Height (4) x Width (3)
119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
@@ -3332,6 +3424,7 @@ LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloa
199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
+ // Batch 0, Channel 1, Height (4) x Width (3)
110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
@@ -3343,89 +3436,131 @@ LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloa
188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
- 161.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
- }));
+ 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
+ };
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+ return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+ inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
+}
- armnn::L2NormalizationQueueDescriptor descriptor;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+LayerTestResult<float, 4> L2Normalization3dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+ // Width: 3
+ // Height: 4
+ // Channels: 2
+ // BatchSize: 1
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
+ const armnn::TensorShape inputOutputShape{ 1, 4, 3, 2 };
+ std::vector<float> inputValues
+ {
+ // Batch 0, Height 0, Width (3) x Channel (2)
+ 119.0f, 110.0f,
+ 21.0f, 140.0f,
+ 150.0f, 73.0f,
+
+ // Batch 0, Height 1, Width (3) x Channel (2)
+ 149.0f, 211.0f,
+ 32.0f, 212.0f,
+ 179.0f, 89.0f,
+
+ // Batch 0, Height 2, Width (3) x Channel (2)
+ 15.0f, 24.0f,
+ 227.0f, 138.0f,
+ 141.0f, 188.0f,
+
+ // Batch 0, Height 3, Width (3) x Channel (2)
+ 147.0f, 162.0f,
+ 199.0f, 12.0f,
+ 220.0f, 161.0f
+ };
+ std::vector<float> expectedOutputValues
+ {
+ // Batch 0, Height 0, Width (3) x Channel (2)
+ 119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
+ 110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
+ 21.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
+ 140.0f * CalcInvL2Norm({ 21.0f, 140.0f }),
+ 150.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
+ 73.0f * CalcInvL2Norm({ 150.0f, 73.0f }),
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ // Batch 0, Height 1, Width (3) x Channel (2)
+ 149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
+ 211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
+ 32.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
+ 212.0f * CalcInvL2Norm({ 32.0f, 212.0f }),
+ 179.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
+ 89.0f * CalcInvL2Norm({ 179.0f, 89.0f }),
- workloadFactory.Finalize();
- workload->Execute();
+ // Batch 0, Height 2, Width (3) x Channel (2)
+ 15.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
+ 24.0f * CalcInvL2Norm({ 15.0f, 24.0f }),
+ 227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
+ 138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
+ 141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
+ 188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
+ // Batch 0, Height 3, Width (3) x Channel (2)
+ 147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
+ 162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
+ 199.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
+ 12.0f * CalcInvL2Norm({ 199.0f, 12.0f }),
+ 220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
+ 161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
+ };
+
+ return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+ inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
}
LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory)
{
- constexpr unsigned int inputWidth = 3;
- constexpr unsigned int inputHeight = 4;
- constexpr unsigned int inputChannels = 3;
- constexpr unsigned int inputBatchSize = 2;
-
- constexpr unsigned int outputWidth = inputWidth;
- constexpr unsigned int outputHeight = inputHeight;
- constexpr unsigned int outputChannels = inputChannels;
- constexpr unsigned int outputBatchSize = inputBatchSize;
-
- const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::DataType::Float32);
- const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::DataType::Float32);
+ // Width: 3
+ // Height: 4
+ // Channels: 3
+ // BatchSize: 2
- auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
- // Batch 0, Channel 0
+ const armnn::TensorShape inputOutputShape{ 2, 3, 4, 3 };
+ std::vector<float> inputValues
+ {
+ // Batch 0, Channel 0, Height (4) x Width (3)
235.0f, 46.0f, 178.0f,
100.0f, 123.0f, 19.0f,
172.0f, 74.0f, 250.0f,
6.0f, 195.0f, 80.0f,
- // Batch 0, Channel 1
+ // Batch 0, Channel 1, Height (4) x Width (3)
113.0f, 95.0f, 202.0f,
77.0f, 114.0f, 71.0f,
122.0f, 246.0f, 166.0f,
82.0f, 28.0f, 37.0f,
- // Batch 0, Channel 2
+ // Batch 0, Channel 2, Height (4) x Width (3)
56.0f, 170.0f, 162.0f,
194.0f, 89.0f, 254.0f,
12.0f, 209.0f, 200.0f,
1.0f, 64.0f, 54.0f,
- // Batch 1, Channel 0
+ // Batch 1, Channel 0, Height (4) x Width (3)
67.0f, 90.0f, 49.0f,
7.0f, 163.0f, 18.0f,
25.0f, 117.0f, 103.0f,
247.0f, 59.0f, 189.0f,
- // Batch 1, Channel 1
+ // Batch 1, Channel 1, Height (4) x Width (3)
239.0f, 104.0f, 199.0f,
17.0f, 124.0f, 153.0f,
222.0f, 217.0f, 75.0f,
32.0f, 126.0f, 21.0f,
- // Batch 1, Channel 2
+ // Batch 1, Channel 2, Height (4) x Width (3)
97.0f, 145.0f, 215.0f,
115.0f, 116.0f, 238.0f,
226.0f, 16.0f, 132.0f,
- 92.0f, 125.0f, 88.0f,
- }));
-
- LayerTestResult<float, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
-
- // Batch 0, Channel 0
+ 92.0f, 125.0f, 88.0f
+ };
+ std::vector<float> expectedOutputValues
+ {
+ // Batch 0, Channel 0, Height (4) x Width (3)
235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
@@ -3439,7 +3574,7 @@ LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloa
195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
- // Batch 0, Channel 1
+ // Batch 0, Channel 1, Height (4) x Width (3)
113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
@@ -3453,7 +3588,7 @@ LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloa
28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
- // Batch 0, Channel 2
+ // Batch 0, Channel 2, Height (4) x Width (3)
56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
@@ -3467,7 +3602,7 @@ LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloa
64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
- // Batch 1, Channel 0
+ // Batch 1, Channel 0, Height (4) x Width (3)
67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
@@ -3481,7 +3616,7 @@ LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloa
59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
- // Batch 1, Channel 1
+ // Batch 1, Channel 1, Height (4) x Width (3)
239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
@@ -3495,7 +3630,7 @@ LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloa
126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
- // Batch 1, Channel 2
+ // Batch 1, Channel 2, Height (4) x Width (3)
97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
@@ -3507,28 +3642,156 @@ LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloa
132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
- 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
- }));
+ 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
+ };
- std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
- std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+ return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+ inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
+}
- armnn::L2NormalizationQueueDescriptor descriptor;
- armnn::WorkloadInfo info;
- AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
- AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+LayerTestResult<float, 4> L2Normalization4dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+ // Width: 3
+ // Height: 4
+ // Channels: 3
+ // BatchSize: 2
- std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
+ const armnn::TensorShape inputOutputShape{ 2, 4, 3, 3 };
+ std::vector<float> inputValues
+ {
+ // Batch 0, Height 0, Width (3) x Channel (3)
+ 235.0f, 113.0f, 56.0f,
+ 46.0f, 95.0f, 170.0f,
+ 178.0f, 202.0f, 162.0f,
+
+ // Batch 0, Height 1, Width (3) x Channel (3)
+ 100.0f, 77.0f, 194.0f,
+ 123.0f, 114.0f, 89.0f,
+ 19.0f, 71.0f, 254.0f,
+
+ // Batch 0, Height 2, Width (3) x Channel (3)
+ 172.0f, 122.0f, 12.0f,
+ 74.0f, 246.0f, 209.0f,
+ 250.0f, 166.0f, 200.0f,
+
+ // Batch 0, Height 3, Width (3) x Channel (3)
+ 6.0f, 82.0f, 1.0f,
+ 195.0f, 28.0f, 64.0f,
+ 80.0f, 37.0f, 54.0f,
+
+ // Batch 1, Height 0, Width (3) x Channel (3)
+ 67.0f, 239.0f, 97.0f,
+ 90.0f, 104.0f, 145.0f,
+ 49.0f, 199.0f, 215.0f,
+
+ // Batch 1, Height 1, Width (3) x Channel (3)
+ 7.0f, 17.0f, 115.0f,
+ 163.0f, 124.0f, 116.0f,
+ 18.0f, 153.0f, 238.0f,
+
+ // Batch 1, Height 2, Width (3) x Channel (3)
+ 25.0f, 222.0f, 226.0f,
+ 117.0f, 217.0f, 16.0f,
+ 103.0f, 75.0f, 132.0f,
+
+ // Batch 1, Height 3, Width (3) x Channel (3)
+ 247.0f, 32.0f, 92.0f,
+ 59.0f, 126.0f, 125.0f,
+ 189.0f, 21.0f, 88.0f
+ };
+ std::vector<float> expectedOutputValues
+ {
+ // Batch 0, Height 0, Width (3) x Channel (3)
+ 235.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
+ 113.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
+ 56.0f * CalcInvL2Norm({ 235.0f, 113.0f, 56.0f }),
+ 46.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
+ 95.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
+ 170.0f * CalcInvL2Norm({ 46.0f, 95.0f, 170.0f }),
+ 178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
+ 202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
+ 162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
- inputHandle->Allocate();
- outputHandle->Allocate();
- CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+ // Batch 0, Height 1, Width (3) x Channel (3)
+ 100.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
+ 77.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
+ 194.0f * CalcInvL2Norm({ 100.0f, 77.0f, 194.0f }),
+ 123.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
+ 114.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
+ 89.0f * CalcInvL2Norm({ 123.0f, 114.0f, 89.0f }),
+ 19.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
+ 71.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
+ 254.0f * CalcInvL2Norm({ 19.0f, 71.0f, 254.0f }),
- workloadFactory.Finalize();
- workload->Execute();
+ // Batch 0, Height 2, Width (3) x Channel (3)
+ 172.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
+ 122.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
+ 12.0f * CalcInvL2Norm({ 172.0f, 122.0f, 12.0f }),
+ 74.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
+ 246.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
+ 209.0f * CalcInvL2Norm({ 74.0f, 246.0f, 209.0f }),
+ 250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
+ 166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
+ 200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
- CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
- return result;
+ // Batch 0, Height 3, Width (3) x Channel (3)
+ 6.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
+ 82.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
+ 1.0f * CalcInvL2Norm({ 6.0f, 82.0f, 1.0f }),
+ 195.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
+ 28.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
+ 64.0f * CalcInvL2Norm({ 195.0f, 28.0f, 64.0f }),
+ 80.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
+ 37.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
+ 54.0f * CalcInvL2Norm({ 80.0f, 37.0f, 54.0f }),
+
+ // Batch 1, Height 0, Width (3) x Channel (3)
+ 67.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
+ 239.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
+ 97.0f * CalcInvL2Norm({ 67.0f, 239.0f, 97.0f }),
+ 90.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
+ 104.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
+ 145.0f * CalcInvL2Norm({ 90.0f, 104.0f, 145.0f }),
+ 49.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
+ 199.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
+ 215.0f * CalcInvL2Norm({ 49.0f, 199.0f, 215.0f }),
+
+ // Batch 1, Height 1, Width (3) x Channel (3)
+ 7.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
+ 17.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
+ 115.0f * CalcInvL2Norm({ 7.0f, 17.0f, 115.0f }),
+ 163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
+ 124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
+ 116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
+ 18.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
+ 153.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
+ 238.0f * CalcInvL2Norm({ 18.0f, 153.0f, 238.0f }),
+
+ // Batch 1, Height 2, Width (3) x Channel (3)
+ 25.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
+ 222.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
+ 226.0f * CalcInvL2Norm({ 25.0f, 222.0f, 226.0f }),
+ 117.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
+ 217.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
+ 16.0f * CalcInvL2Norm({ 117.0f, 217.0f, 16.0f }),
+ 103.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
+ 75.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
+ 132.0f * CalcInvL2Norm({ 103.0f, 75.0f, 132.0f }),
+
+ // Batch 1, Height 3, Width (3) x Channel (3)
+ 247.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
+ 32.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
+ 92.0f * CalcInvL2Norm({ 247.0f, 32.0f, 92.0f }),
+ 59.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
+ 126.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
+ 125.0f * CalcInvL2Norm({ 59.0f, 126.0f, 125.0f }),
+ 189.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
+ 21.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f }),
+ 88.0f * CalcInvL2Norm({ 189.0f, 21.0f, 88.0f })
+ };
+
+ return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+ inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
}
template <typename T>
diff --git a/src/backends/test/LayerTests.hpp b/src/backends/test/LayerTests.hpp
index 3e5bb3d31d..e4ebaff527 100644
--- a/src/backends/test/LayerTests.hpp
+++ b/src/backends/test/LayerTests.hpp
@@ -249,6 +249,11 @@ LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloa
LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory);
LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> L2Normalization1dNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> L2Normalization2dNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> L2Normalization3dNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> L2Normalization4dNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+
LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory);
LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory);
diff --git a/src/backends/test/Reference.cpp b/src/backends/test/Reference.cpp
index 30a8f8e1a5..05ebf2e8b0 100644
--- a/src/backends/test/Reference.cpp
+++ b/src/backends/test/Reference.cpp
@@ -193,12 +193,18 @@ ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagUint8Test)
// Fake Quantization
ARMNN_AUTO_TEST_CASE(FakeQuantization, FakeQuantizationTest)
-// L2 Noramlization
+// L2 Normalization
ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest)
ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest)
ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest)
ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest)
+// NOTE: These tests are disabled until NHWC is supported by the reference L2Normalization implementation.
+//ARMNN_AUTO_TEST_CASE(L2Normalization1dNhwc, L2Normalization1dNhwcTest);
+//ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dNhwcTest);
+//ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dNhwcTest);
+//ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dNhwcTest);
+
// Constant
ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantUint8Test)