// // Copyright © 2017 Arm Ltd. All rights reserved. // See LICENSE file in the project root for full license information. // #include "NeonL2NormalizationFloat32Workload.hpp" #include "backends/ArmComputeUtils.hpp" namespace armnn { arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo& input, const TensorInfo& output) { const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input); const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output); arm_compute::NormalizationLayerInfo normalizationInfo = CreateAclNormalizationLayerInfoForL2Normalization(input); return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo); } NeonL2NormalizationFloat32Workload::NeonL2NormalizationFloat32Workload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info, std::shared_ptr& memoryManager) : FloatWorkload(descriptor, info) , m_Layer(memoryManager) { m_Data.ValidateInputsOutputs("NeonL2NormalizationFloat32Workload", 1, 1); arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); m_Layer.configure(&input, &output, CreateAclNormalizationLayerInfoForL2Normalization(info.m_InputTensorInfos[0])); } void NeonL2NormalizationFloat32Workload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonL2NormalizationFloat32Workload_Execute"); m_Layer.run(); } } //namespace armnn