diff options
author | FrancisMurtagh <francis.murtagh@arm.com> | 2018-11-16 12:26:20 +0000 |
---|---|---|
committer | Aron Virginas-Tar <aron.virginas-tar@arm.com> | 2018-11-16 12:52:06 +0000 |
commit | 81f2232a7e1145f80aaa2e382bb02c7653a058aa (patch) | |
tree | 2d546696c7876338bbb7797b4fdd03b0b6a7c360 /src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp | |
parent | 48a4ae828a4692132a755ae02b517e2ceb189673 (diff) | |
download | armnn-81f2232a7e1145f80aaa2e382bb02c7653a058aa.tar.gz |
IVGCVSW-2017: CLWorkload to use L2Normalization
* Changed ClL2Normalisation from using CLNormalizationLayer
to use CLL2NormalizeLayer to normalise along the channel axis
in either NCHW or NHWC format.
Change-Id: I399cbee408a277d1ef8c6c85ebcbd86d6c3e407b
Diffstat (limited to 'src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp')
-rw-r--r-- | src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp | 16 |
1 files changed, 9 insertions, 7 deletions
diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp index 754155d182..ca3b36e16f 100644 --- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp @@ -14,11 +14,12 @@ arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo& input, const TensorInfo& output, const L2NormalizationDescriptor& descriptor) { - const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); - const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); + const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout.GetDataLayout()); + const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo( + output, descriptor.m_DataLayout.GetDataLayout()); arm_compute::NormalizationLayerInfo normalizationInfo = - CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout); + CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout.GetDataLayout()); return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo); } @@ -33,13 +34,14 @@ NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2Norma arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout()); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); - m_Layer.configure(&input, &output, - CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0], - m_Data.m_Parameters.m_DataLayout)); + m_Layer.configure(&input, + &output, + CreateAclNormalizationLayerInfoForL2Normalization( + info.m_InputTensorInfos[0], m_Data.m_Parameters.m_DataLayout.GetDataLayout())); } void NeonL2NormalizationFloatWorkload::Execute() const |