From 8800c00770ed14eb48045cfcf033d6b67595a126 Mon Sep 17 00:00:00 2001 From: Matthew Bentham Date: Mon, 19 Nov 2018 13:19:28 +0000 Subject: IVGCVSW-2169 Remove DataLayoutIndexed from public API Change-Id: If8d8087d9d365e467d3ca9bf9c40d7219cb75cfd --- .../workloads/NeonBatchNormalizationFloatWorkload.cpp | 16 +++++++--------- .../neon/workloads/NeonL2NormalizationFloatWorkload.cpp | 11 +++++------ src/backends/neon/workloads/NeonPooling2dWorkload.cpp | 6 +++--- 3 files changed, 15 insertions(+), 18 deletions(-) (limited to 'src/backends/neon') diff --git a/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp index e576c64752..a8181f66d9 100644 --- a/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp @@ -21,20 +21,18 @@ arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo& input, const TensorInfo& gamma, const BatchNormalizationDescriptor& descriptor) { - const DataLayout dataLayout = descriptor.m_DataLayout.GetDataLayout(); - const arm_compute::TensorInfo aclInputInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(input, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); const arm_compute::TensorInfo aclOutputInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(output, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); const arm_compute::TensorInfo aclMeanInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(mean, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(mean, descriptor.m_DataLayout); const arm_compute::TensorInfo aclVarInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(var, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(var, descriptor.m_DataLayout); const arm_compute::TensorInfo aclBetaInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(beta, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(beta, descriptor.m_DataLayout); const arm_compute::TensorInfo aclGammaInfo = - armcomputetensorutils::BuildArmComputeTensorInfo(gamma, dataLayout); + armcomputetensorutils::BuildArmComputeTensorInfo(gamma, descriptor.m_DataLayout); return arm_compute::NEBatchNormalizationLayer::validate(&aclInputInfo, &aclOutputInfo, @@ -54,7 +52,7 @@ NeonBatchNormalizationFloatWorkload::NeonBatchNormalizationFloatWorkload( arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout()); + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp index ca3b36e16f..df8caefbd2 100644 --- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp +++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp @@ -14,12 +14,11 @@ arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo& input, const TensorInfo& output, const L2NormalizationDescriptor& descriptor) { - const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout.GetDataLayout()); - const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo( - output, descriptor.m_DataLayout.GetDataLayout()); + const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); + const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); arm_compute::NormalizationLayerInfo normalizationInfo = - CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout.GetDataLayout()); + CreateAclNormalizationLayerInfoForL2Normalization(input, descriptor.m_DataLayout); return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo); } @@ -34,14 +33,14 @@ NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2Norma arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout()); + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); m_Layer.configure(&input, &output, CreateAclNormalizationLayerInfoForL2Normalization( - info.m_InputTensorInfos[0], m_Data.m_Parameters.m_DataLayout.GetDataLayout())); + info.m_InputTensorInfos[0], m_Data.m_Parameters.m_DataLayout)); } void NeonL2NormalizationFloatWorkload::Execute() const diff --git a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp index b8acf36028..9c8f71ad19 100644 --- a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp +++ b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp @@ -18,9 +18,9 @@ arm_compute::Status NeonPooling2dWorkloadValidate(const TensorInfo& input, const Pooling2dDescriptor& descriptor) { const arm_compute::TensorInfo aclInputInfo = - BuildArmComputeTensorInfo(input, descriptor.m_DataLayout.GetDataLayout()); + BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); const arm_compute::TensorInfo aclOutputInfo = - BuildArmComputeTensorInfo(output, descriptor.m_DataLayout.GetDataLayout()); + BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); arm_compute::PoolingLayerInfo layerInfo = BuildArmComputePoolingLayerInfo(descriptor); @@ -36,7 +36,7 @@ NeonPooling2dWorkload::NeonPooling2dWorkload( arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); - arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout.GetDataLayout()); + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); input.info()->set_data_layout(aclDataLayout); output.info()->set_data_layout(aclDataLayout); -- cgit v1.2.1