aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancisMurtagh <francis.murtagh@arm.com>2018-11-16 12:26:20 +0000
committerAron Virginas-Tar <aron.virginas-tar@arm.com>2018-11-16 12:52:06 +0000
commit81f2232a7e1145f80aaa2e382bb02c7653a058aa (patch)
tree2d546696c7876338bbb7797b4fdd03b0b6a7c360
parent48a4ae828a4692132a755ae02b517e2ceb189673 (diff)
downloadarmnn-81f2232a7e1145f80aaa2e382bb02c7653a058aa.tar.gz
IVGCVSW-2017: CLWorkload to use L2Normalization
* Changed ClL2Normalisation from using CLNormalizationLayer to use CLL2NormalizeLayer to normalise along the channel axis in either NCHW or NHWC format. Change-Id: I399cbee408a277d1ef8c6c85ebcbd86d6c3e407b
-rw-r--r--include/armnn/Descriptors.hpp2
-rw-r--r--src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp19
-rw-r--r--src/backends/cl/workloads/ClL2NormalizationFloatWorkload.hpp2
-rw-r--r--src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp16
4 files changed, 21 insertions, 18 deletions
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index f8dd18ab8a..2781786f10 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -282,7 +282,7 @@ struct L2NormalizationDescriptor
: m_DataLayout(DataLayout::NCHW)
{}
- DataLayout m_DataLayout;
+ DataLayoutIndexed m_DataLayout;
};
struct BatchNormalizationDescriptor
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
index f827137527..74e40ecbf6 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp
@@ -18,13 +18,14 @@ arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor)
{
- const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
- const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
+ const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input,
+ descriptor.m_DataLayout.GetDataLayout());
+ const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output,
+ descriptor.m_DataLayout.GetDataLayout());
- arm_compute::NormalizationLayerInfo normalizationInfo =
- CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout);
+ unsigned int axis = (descriptor.m_DataLayout.GetDataLayout() == DataLayout::NCHW) ? 2 : 0;
- return arm_compute::CLNormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
+ return arm_compute::CLL2NormalizeLayer::validate(&aclInput, &aclOutput, axis);
}
ClL2NormalizationFloatWorkload::ClL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor& descriptor,
@@ -36,13 +37,13 @@ ClL2NormalizationFloatWorkload::ClL2NormalizationFloatWorkload(const L2Normaliza
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout());
input.info()->set_data_layout(aclDataLayout);
output.info()->set_data_layout(aclDataLayout);
- m_Layer.configure(&input, &output,
- CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0],
- m_Data.m_Parameters.m_DataLayout));
+ unsigned int axis = (m_Data.m_Parameters.m_DataLayout.GetDataLayout() == DataLayout::NCHW) ? 2 : 0;
+
+ m_Layer.configure(&input, &output, axis);
}
void ClL2NormalizationFloatWorkload::Execute() const
diff --git a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.hpp b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.hpp
index a16a8f62c4..53bbfc82c9 100644
--- a/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.hpp
+++ b/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.hpp
@@ -25,7 +25,7 @@ public:
private:
// Purposely not a CLL2Normalize function. See constructor.
- mutable arm_compute::CLNormalizationLayer m_Layer;
+ mutable arm_compute::CLL2NormalizeLayer m_Layer;
};
} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
index 754155d182..ca3b36e16f 100644
--- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
@@ -14,11 +14,12 @@ arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo& input,
const TensorInfo& output,
const L2NormalizationDescriptor& descriptor)
{
- const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
- const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
+ const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout.GetDataLayout());
+ const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(
+ output, descriptor.m_DataLayout.GetDataLayout());
arm_compute::NormalizationLayerInfo normalizationInfo =
- CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout);
+ CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout.GetDataLayout());
return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
}
@@ -33,13 +34,14 @@ NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2Norma
arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
- arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout());
input.info()->set_data_layout(aclDataLayout);
output.info()->set_data_layout(aclDataLayout);
- m_Layer.configure(&input, &output,
- CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0],
- m_Data.m_Parameters.m_DataLayout));
+ m_Layer.configure(&input,
+ &output,
+ CreateAclNormalizationLayerInfoForL2Normalization(
+ info.m_InputTensorInfos[0], m_Data.m_Parameters.m_DataLayout.GetDataLayout()));
}
void NeonL2NormalizationFloatWorkload::Execute() const