From f540eb8111ce5d241111da487be7d817661e29b4 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Fri, 10 Apr 2020 19:24:55 +0100 Subject: IVGCVSW-3846 Add NEON GATHER Workload Signed-off-by: Teresa Charlin Change-Id: I1a66fdad63cef16866d9dfcb8a339647f856e1d4 --- src/backends/neon/workloads/CMakeLists.txt | 2 + .../neon/workloads/NeonConstantWorkload.cpp | 10 ++++- src/backends/neon/workloads/NeonGatherWorkload.cpp | 46 ++++++++++++++++++++++ src/backends/neon/workloads/NeonGatherWorkload.hpp | 28 +++++++++++++ src/backends/neon/workloads/NeonWorkloads.hpp | 1 + 5 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 src/backends/neon/workloads/NeonGatherWorkload.cpp create mode 100644 src/backends/neon/workloads/NeonGatherWorkload.hpp (limited to 'src/backends/neon/workloads') diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt index 6b3fe67f1f..685d75be12 100644 --- a/src/backends/neon/workloads/CMakeLists.txt +++ b/src/backends/neon/workloads/CMakeLists.txt @@ -46,6 +46,8 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonFloorFloatWorkload.hpp NeonFullyConnectedWorkload.cpp NeonFullyConnectedWorkload.hpp + NeonGatherWorkload.cpp + NeonGatherWorkload.hpp NeonInstanceNormalizationWorkload.cpp NeonInstanceNormalizationWorkload.hpp NeonL2NormalizationFloatWorkload.cpp diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp index f7c8a73f78..05fdcf2fdd 100644 --- a/src/backends/neon/workloads/NeonConstantWorkload.cpp +++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp @@ -23,7 +23,7 @@ arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output) { const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); - std::array supportedTypes = { + std::array supportedTypes = { arm_compute::DataType::BFLOAT16, arm_compute::DataType::F16, arm_compute::DataType::F32, @@ -31,7 +31,8 @@ arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output) arm_compute::DataType::QASYMM8_SIGNED, arm_compute::DataType::QSYMM16, arm_compute::DataType::QSYMM8, - arm_compute::DataType::QSYMM8_PER_CHANNEL + arm_compute::DataType::QSYMM8_PER_CHANNEL, + arm_compute::DataType::S32 }; auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type()); @@ -110,6 +111,11 @@ void NeonConstantWorkload::Execute() const CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor(), output); break; } + case arm_compute::DataType::S32: + { + CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor(), output); + break; + } default: { ARMNN_ASSERT_MSG(false, "Unknown data type"); diff --git a/src/backends/neon/workloads/NeonGatherWorkload.cpp b/src/backends/neon/workloads/NeonGatherWorkload.cpp new file mode 100644 index 0000000000..2e7c741781 --- /dev/null +++ b/src/backends/neon/workloads/NeonGatherWorkload.cpp @@ -0,0 +1,46 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonGatherWorkload.hpp" +#include "NeonWorkloadUtils.hpp" +#include +#include + +namespace armnn +{ +arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo& input, + const TensorInfo& indices, + const TensorInfo& output) +{ + const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclIndices = BuildArmComputeTensorInfo(indices); + const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output); + + int aclAxis = ComputeAclAxis(0, input); + + return arm_compute::NEGather::validate(&aclInput, &aclIndices, &aclOutput, aclAxis); +} + +NeonGatherWorkload::NeonGatherWorkload(const GatherQueueDescriptor& descriptor, + const WorkloadInfo& info) + : BaseWorkload(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonGatherWorkload", 1, 1); + + arm_compute::ITensor& input = PolymorphicDowncast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& indices = PolymorphicDowncast(m_Data.m_Inputs[1])->GetTensor(); + arm_compute::ITensor& output = PolymorphicDowncast(m_Data.m_Outputs[0])->GetTensor(); + + int aclAxis = ComputeAclAxis(0, info.m_InputTensorInfos[0]); + + m_Layer.configure(&input, &indices, &output, aclAxis); +} + +void NeonGatherWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonGatherWorkload_Execute"); + m_Layer.run(); +} +} //namespace armnn \ No newline at end of file diff --git a/src/backends/neon/workloads/NeonGatherWorkload.hpp b/src/backends/neon/workloads/NeonGatherWorkload.hpp new file mode 100644 index 0000000000..b1b47a5069 --- /dev/null +++ b/src/backends/neon/workloads/NeonGatherWorkload.hpp @@ -0,0 +1,28 @@ +// +// Copyright © 2020 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include + +namespace armnn +{ +arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo& input, + const TensorInfo& indices, + const TensorInfo& output); + +class NeonGatherWorkload : public BaseWorkload +{ +public: + NeonGatherWorkload(const GatherQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NEGather m_Layer; +}; + +} //namespace armnn \ No newline at end of file diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp index 2da6ea0c01..243f5a46ee 100644 --- a/src/backends/neon/workloads/NeonWorkloads.hpp +++ b/src/backends/neon/workloads/NeonWorkloads.hpp @@ -26,6 +26,7 @@ #include "NeonExpWorkload.hpp" #include "NeonFloorFloatWorkload.hpp" #include "NeonFullyConnectedWorkload.hpp" +#include "NeonGatherWorkload.hpp" #include "NeonInstanceNormalizationWorkload.hpp" #include "NeonL2NormalizationFloatWorkload.hpp" #include "NeonLstmFloatWorkload.hpp" -- cgit v1.2.1