From 10b4dfd8e9ccd7a03df7bb053ee1c644cb37f8ab Mon Sep 17 00:00:00 2001 From: David Beck Date: Wed, 19 Sep 2018 12:03:20 +0100 Subject: IVGCVSW-1897 : build infrastructure for the src/backends folder Change-Id: I7ebafb675ccc77ad54d1deb01412a8379a5356bb --- .../NeonWorkloads/NeonSubtractionFloatWorkload.cpp | 46 ++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 src/backends/NeonWorkloads/NeonSubtractionFloatWorkload.cpp (limited to 'src/backends/NeonWorkloads/NeonSubtractionFloatWorkload.cpp') diff --git a/src/backends/NeonWorkloads/NeonSubtractionFloatWorkload.cpp b/src/backends/NeonWorkloads/NeonSubtractionFloatWorkload.cpp new file mode 100644 index 0000000000..3f37d82d22 --- /dev/null +++ b/src/backends/NeonWorkloads/NeonSubtractionFloatWorkload.cpp @@ -0,0 +1,46 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonSubtractionFloatWorkload.hpp" +#include "backends/ArmComputeTensorUtils.hpp" +#include "backends/CpuTensorHandle.hpp" + +namespace armnn +{ + +arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo& input0, + const TensorInfo& input1, + const TensorInfo& output) +{ + const arm_compute::TensorInfo aclInput0 = armcomputetensorutils::BuildArmComputeTensorInfo(input0); + const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input1); + const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output); + + return arm_compute::NEArithmeticSubtraction::validate(&aclInput0, + &aclInput1, + &aclOutput, + arm_compute::ConvertPolicy::SATURATE); +} + +NeonSubtractionFloatWorkload::NeonSubtractionFloatWorkload(const SubtractionQueueDescriptor& descriptor, + const WorkloadInfo& info) + : FloatWorkload(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonSubtractionFloatWorkload", 2, 1); + + arm_compute::ITensor& input1 = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& input2 = boost::polymorphic_downcast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + + m_SubLayer.configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE); +} + +void NeonSubtractionFloatWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSubtractionFloatWorkload_Execute"); + m_SubLayer.run(); +} + +} //namespace armnn -- cgit v1.2.1