diff options
author | Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> | 2019-09-09 13:36:45 +0100 |
---|---|---|
committer | Narumol Prangnawarat <narumol.prangnawarat@arm.com> | 2019-09-10 08:59:25 +0000 |
commit | 914e4db5a9083e922d89f133672fd44e92016e96 (patch) | |
tree | fd3d7d4a5c739eef02c756a668fa4a95c2255f36 /src/backends/neon/workloads/NeonAbsWorkload.cpp | |
parent | ab173e9b6978d5befb4884a803773967d52bcfef (diff) | |
download | armnn-914e4db5a9083e922d89f133672fd44e92016e96.tar.gz |
IVGCVSW-3822 Add NEON workload support for ABS
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I0c2ead004200e053d748ea39937f2f9ed35a636b
Diffstat (limited to 'src/backends/neon/workloads/NeonAbsWorkload.cpp')
-rw-r--r-- | src/backends/neon/workloads/NeonAbsWorkload.cpp | 43 |
1 files changed, 43 insertions, 0 deletions
diff --git a/src/backends/neon/workloads/NeonAbsWorkload.cpp b/src/backends/neon/workloads/NeonAbsWorkload.cpp new file mode 100644 index 0000000000..7f8ed5a006 --- /dev/null +++ b/src/backends/neon/workloads/NeonAbsWorkload.cpp @@ -0,0 +1,43 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonAbsWorkload.hpp" + +#include "NeonWorkloadUtils.hpp" + +#include <aclCommon/ArmComputeTensorHandle.hpp> +#include <aclCommon/ArmComputeTensorUtils.hpp> + +#include <boost/cast.hpp> + +namespace armnn +{ + +arm_compute::Status NeonAbsWorkloadValidate(const TensorInfo& input, const TensorInfo& output) +{ + const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output); + + return arm_compute::NEAbsLayer::validate(&aclInput, &aclOutput); +} + +NeonAbsWorkload::NeonAbsWorkload(const AbsQueueDescriptor& descriptor, const WorkloadInfo& info) + : BaseWorkload<AbsQueueDescriptor>(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonAbsWorkload", 1, 1); + + arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); + + m_AbsLayer.configure(&input, &output); +} + +void NeonAbsWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonAbsWorkload_Execute"); + m_AbsLayer.run(); +} + +} // namespace armnn |