From 81f2232a7e1145f80aaa2e382bb02c7653a058aa Mon Sep 17 00:00:00 2001 From: FrancisMurtagh Date: Fri, 16 Nov 2018 12:26:20 +0000 Subject: IVGCVSW-2017: CLWorkload to use L2Normalization * Changed ClL2Normalisation from using CLNormalizationLayer to use CLL2NormalizeLayer to normalise along the channel axis in either NCHW or NHWC format. Change-Id: I399cbee408a277d1ef8c6c85ebcbd86d6c3e407b --- .../neon/workloads/NeonL2NormalizationFloatWorkload.cpp | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'src/backends/neon/workloads') diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp index 754155d182..ca3b36e16f 100644 --- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp @@ -14,11 +14,12 @@ arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo& input, const TensorInfo& output, const L2NormalizationDescriptor& descriptor) { - const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); - const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); + const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout.GetDataLayout()); + const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo( + output, descriptor.m_DataLayout.GetDataLayout()); arm_compute::NormalizationLayerInfo normalizationInfo = - CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout); + CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout.GetDataLayout()); return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo); } @@ -33,13 +34,14 @@ NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2Norma arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout()); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); - m_Layer.configure(&input, &output, - CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0], - m_Data.m_Parameters.m_DataLayout)); + m_Layer.configure(&input, + &output, + CreateAclNormalizationLayerInfoForL2Normalization( + info.m_InputTensorInfos[0], m_Data.m_Parameters.m_DataLayout.GetDataLayout())); } void NeonL2NormalizationFloatWorkload::Execute() const -- cgit v1.2.1