diff options
author | David Beck <david.beck@arm.com> | 2018-09-19 12:03:20 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-10 16:16:56 +0100 |
commit | 10b4dfd8e9ccd7a03df7bb053ee1c644cb37f8ab (patch) | |
tree | 1ac5b4f415531e2ef759439ab8e113f177bea7c5 /src/armnn/backends/NeonWorkloads/NeonNormalizationFloatWorkload.cpp | |
parent | a3f165624b2cdfbced674af5a6e11856b1e746d9 (diff) | |
download | armnn-10b4dfd8e9ccd7a03df7bb053ee1c644cb37f8ab.tar.gz |
IVGCVSW-1897 : build infrastructure for the src/backends folder
Change-Id: I7ebafb675ccc77ad54d1deb01412a8379a5356bb
Diffstat (limited to 'src/armnn/backends/NeonWorkloads/NeonNormalizationFloatWorkload.cpp')
-rw-r--r-- | src/armnn/backends/NeonWorkloads/NeonNormalizationFloatWorkload.cpp | 70 |
1 files changed, 0 insertions, 70 deletions
diff --git a/src/armnn/backends/NeonWorkloads/NeonNormalizationFloatWorkload.cpp b/src/armnn/backends/NeonWorkloads/NeonNormalizationFloatWorkload.cpp deleted file mode 100644 index 4534c376d8..0000000000 --- a/src/armnn/backends/NeonWorkloads/NeonNormalizationFloatWorkload.cpp +++ /dev/null @@ -1,70 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "NeonNormalizationFloatWorkload.hpp" -#include "backends/NeonLayerSupport.hpp" -#include "backends/ArmComputeUtils.hpp" -#include "backends/ArmComputeTensorUtils.hpp" - -namespace armnn -{ - -arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo& input, - const TensorInfo& output, - const NormalizationDescriptor& descriptor) -{ - const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input); - const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output); - - arm_compute::NormalizationLayerInfo normalizationInfo = - armcomputetensorutils::BuildArmComputeNormalizationLayerInfo(descriptor); - - return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo); -} - -NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor, - const WorkloadInfo& info, - std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager) - : FloatWorkload<NormalizationQueueDescriptor>(descriptor, info) - , m_NormalizationLayer(memoryManager) -{ - m_Data.ValidateInputsOutputs("NeonNormalizationFloatWorkload", 1, 1); - std::string reasonIfUnsupported; - if (!IsNeonNormalizationDescParamsSupported(&reasonIfUnsupported, m_Data.m_Parameters)) - { - throw UnimplementedException(reasonIfUnsupported); - } - - // Input and output tensors have to have the same dimensionality. - if (info.m_InputTensorInfos[0].GetShape()[1] != info.m_OutputTensorInfos[0].GetShape()[1] - || info.m_InputTensorInfos[0].GetShape()[0] != info.m_OutputTensorInfos[0].GetShape()[0] - || info.m_InputTensorInfos[0].GetShape()[3] != info.m_OutputTensorInfos[0].GetShape()[3] - || info.m_InputTensorInfos[0].GetShape()[2] != info.m_OutputTensorInfos[0].GetShape()[2]) - { - throw InvalidArgumentException("Normalization requires input and output tensors to have equal dimensionality."); - } - - arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); - arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); - - const arm_compute::NormType normType = - ConvertNormalizationAlgorithmChannelToAclNormType(m_Data.m_Parameters.m_NormChannelType); - arm_compute::NormalizationLayerInfo normalizationInfo(normType, - m_Data.m_Parameters.m_NormSize, - m_Data.m_Parameters.m_Alpha, - m_Data.m_Parameters.m_Beta, - m_Data.m_Parameters.m_K, - false); - - m_NormalizationLayer.configure(&input, &output, normalizationInfo); -} - -void NeonNormalizationFloatWorkload::Execute() const -{ - ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonNormalizationFloatWorkload_Execute"); - m_NormalizationLayer.run(); -} - -} //namespace armnn |