From c48ac8c8cea1748ebfef15144f070799d4a129c3 Mon Sep 17 00:00:00 2001 From: Matthew Bentham Date: Wed, 12 Dec 2018 16:15:59 +0000 Subject: MLCE-80 Remove strong typing from NeonBatchNormalization Technical debt work towards adding some new Neon workloads Change-Id: I08ab6dd14d0e89d4ebc8a878fb69caa5681012bf --- .../workloads/NeonBatchNormalizationWorkload.cpp | 104 +++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp (limited to 'src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp') diff --git a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp new file mode 100644 index 0000000000..44d5035431 --- /dev/null +++ b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp @@ -0,0 +1,104 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonBatchNormalizationWorkload.hpp" +#include +#include +#include + +namespace armnn +{ +using namespace armcomputetensorutils; + + +arm_compute::Status NeonBatchNormalizationValidate(const TensorInfo& input, + const TensorInfo& output, + const TensorInfo& mean, + const TensorInfo& var, + const TensorInfo& beta, + const TensorInfo& gamma, + const BatchNormalizationDescriptor& descriptor) +{ + const arm_compute::TensorInfo aclInputInfo = + armcomputetensorutils::BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); + const arm_compute::TensorInfo aclOutputInfo = + armcomputetensorutils::BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); + const arm_compute::TensorInfo aclMeanInfo = + armcomputetensorutils::BuildArmComputeTensorInfo(mean, descriptor.m_DataLayout); + const arm_compute::TensorInfo aclVarInfo = + armcomputetensorutils::BuildArmComputeTensorInfo(var, descriptor.m_DataLayout); + const arm_compute::TensorInfo aclBetaInfo = + armcomputetensorutils::BuildArmComputeTensorInfo(beta, descriptor.m_DataLayout); + const arm_compute::TensorInfo aclGammaInfo = + armcomputetensorutils::BuildArmComputeTensorInfo(gamma, descriptor.m_DataLayout); + + return arm_compute::NEBatchNormalizationLayer::validate(&aclInputInfo, + &aclOutputInfo, + &aclMeanInfo, + &aclVarInfo, + &aclBetaInfo, + &aclGammaInfo, + descriptor.m_Eps); +} + +NeonBatchNormalizationWorkload::NeonBatchNormalizationWorkload( + const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonBatchNormalizationWorkload", 1, 1); + + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + + arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); + input.info()->set_data_layout(aclDataLayout); + output.info()->set_data_layout(aclDataLayout); + + m_Mean = std::make_unique(); + BuildArmComputeTensor(*m_Mean, m_Data.m_Mean->GetTensorInfo()); + + m_Variance = std::make_unique(); + BuildArmComputeTensor(*m_Variance, m_Data.m_Variance->GetTensorInfo()); + + m_Gamma = std::make_unique(); + BuildArmComputeTensor(*m_Gamma, m_Data.m_Gamma->GetTensorInfo()); + + m_Beta = std::make_unique(); + BuildArmComputeTensor(*m_Beta, m_Data.m_Beta->GetTensorInfo()); + + m_Layer.configure(&input, + &output, + m_Mean.get(), + m_Variance.get(), + m_Beta.get(), + m_Gamma.get(), + m_Data.m_Parameters.m_Eps); + + InitializeArmComputeTensorData(*m_Mean, m_Data.m_Mean); + InitializeArmComputeTensorData(*m_Variance, m_Data.m_Variance); + InitializeArmComputeTensorData(*m_Gamma, m_Data.m_Gamma); + InitializeArmComputeTensorData(*m_Beta, m_Data.m_Beta); + + // Force Compute Library to perform the necessary copying and reshaping, after which + // delete all the input tensors that will no longer be needed + m_Layer.prepare(); + FreeUnusedTensors(); +} + +void NeonBatchNormalizationWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonBatchNormalizationWorkload_Execute"); + m_Layer.run(); +} + +void NeonBatchNormalizationWorkload::FreeUnusedTensors() +{ + FreeTensorIfUnused(m_Mean); + FreeTensorIfUnused(m_Variance); + FreeTensorIfUnused(m_Gamma); + FreeTensorIfUnused(m_Beta); +} + +} //namespace armnn -- cgit v1.2.1