aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTeresa Charlin <teresa.charlinreyes@arm.com>2020-04-10 19:24:55 +0100
committerTeresaARM <teresa.charlinreyes@arm.com>2020-05-29 16:21:58 +0000
commitf540eb8111ce5d241111da487be7d817661e29b4 (patch)
tree6a22c7f0076cce9a0e304120d5b1f4c0d1b4cfc0
parent3940d8bfcaad963e95b882984a93ccd69455822a (diff)
downloadarmnn-f540eb8111ce5d241111da487be7d817661e29b4.tar.gz
IVGCVSW-3846 Add NEON GATHER Workload
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com> Change-Id: I1a66fdad63cef16866d9dfcb8a339647f856e1d4
-rw-r--r--src/backends/aclCommon/ArmComputeUtils.hpp15
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp13
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp5
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp2
-rw-r--r--src/backends/neon/backend.mk1
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp6
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt2
-rw-r--r--src/backends/neon/workloads/NeonConstantWorkload.cpp10
-rw-r--r--src/backends/neon/workloads/NeonGatherWorkload.cpp46
-rw-r--r--src/backends/neon/workloads/NeonGatherWorkload.hpp28
-rw-r--r--src/backends/neon/workloads/NeonWorkloads.hpp1
11 files changed, 126 insertions, 3 deletions
diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp
index d8818ce209..0ee13b3e7f 100644
--- a/src/backends/aclCommon/ArmComputeUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeUtils.hpp
@@ -186,4 +186,19 @@ inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor&
return splitAxis;
}
+/// Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-dim, dim)
+inline int ComputeAclAxis(const int& armnnAxis, const armnn::TensorInfo& tensor)
+{
+ int dim = static_cast<int>(tensor.GetNumDimensions());
+
+ ARMNN_ASSERT(dim != 0);
+ ARMNN_ASSERT((-1 * dim) <= armnnAxis);
+ ARMNN_ASSERT(armnnAxis < dim);
+
+ int sign = (armnnAxis < 0) ? -1 : 1;
+ int aclAxis = sign * dim - 1 - armnnAxis;
+
+ return aclAxis;
+}
+
} // namespace armnn
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 53d0f0b633..a514c8fae2 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -44,6 +44,7 @@
#include "workloads/NeonNegWorkload.hpp"
#include "workloads/NeonNormalizationFloatWorkload.hpp"
#include "workloads/NeonFullyConnectedWorkload.hpp"
+#include "workloads/NeonGatherWorkload.hpp"
#include "workloads/NeonPadWorkload.hpp"
#include "workloads/NeonPermuteWorkload.hpp"
#include "workloads/NeonPooling2dWorkload.hpp"
@@ -435,6 +436,18 @@ bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
descriptor);
}
+bool NeonLayerSupport::IsGatherSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonGatherWorkloadValidate,
+ reasonIfUnsupported,
+ input0,
+ input1,
+ output);
+}
+
bool NeonLayerSupport::IsGreaterSupported(const armnn::TensorInfo& input0,
const armnn::TensorInfo& input1,
const armnn::TensorInfo& output,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index adb1891de8..25817822fd 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -123,6 +123,11 @@ public:
const FullyConnectedDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsGatherSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const override;
+
ARMNN_DEPRECATED_MSG("Use IsComparisonSupported instead")
bool IsGreaterSupported(const TensorInfo& input0,
const TensorInfo& input1,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index ee0e70304b..35082f784c 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -280,7 +280,7 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateFullyConnected(
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGather(const armnn::GatherQueueDescriptor& descriptor,
const armnn::WorkloadInfo& info) const
{
- return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+ return std::make_unique<NeonGatherWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 225687f158..9e9c38cdec 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -42,6 +42,7 @@ BACKEND_SOURCES := \
workloads/NeonExpWorkload.cpp \
workloads/NeonFloorFloatWorkload.cpp \
workloads/NeonFullyConnectedWorkload.cpp \
+ workloads/NeonGatherWorkload.cpp \
workloads/NeonInstanceNormalizationWorkload.cpp \
workloads/NeonL2NormalizationFloatWorkload.cpp \
workloads/NeonLstmFloatWorkload.cpp \
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 0e0558b763..1ac2c61104 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -673,6 +673,12 @@ ARMNN_AUTO_TEST_CASE(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefault
// Floor
ARMNN_AUTO_TEST_CASE(SimpleFloor, SimpleFloorTest<DataType::Float32>)
+// Gather
+ARMNN_AUTO_TEST_CASE(Gather1dParamsFloat32, Gather1dParamsFloat32Test)
+ARMNN_AUTO_TEST_CASE(Gather1dParamsUint8, Gather1dParamsUint8Test)
+ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsFloat32, GatherMultiDimParamsFloat32Test)
+ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsUint8, GatherMultiDimParamsUint8Test)
+
// Equal
ARMNN_AUTO_TEST_CASE(EqualSimple, EqualSimpleTest)
ARMNN_AUTO_TEST_CASE(EqualBroadcast1Element, EqualBroadcast1ElementTest)
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index 6b3fe67f1f..685d75be12 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -46,6 +46,8 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonFloorFloatWorkload.hpp
NeonFullyConnectedWorkload.cpp
NeonFullyConnectedWorkload.hpp
+ NeonGatherWorkload.cpp
+ NeonGatherWorkload.hpp
NeonInstanceNormalizationWorkload.cpp
NeonInstanceNormalizationWorkload.hpp
NeonL2NormalizationFloatWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp
index f7c8a73f78..05fdcf2fdd 100644
--- a/src/backends/neon/workloads/NeonConstantWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp
@@ -23,7 +23,7 @@ arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output)
{
const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
- std::array<arm_compute::DataType,8> supportedTypes = {
+ std::array<arm_compute::DataType,9> supportedTypes = {
arm_compute::DataType::BFLOAT16,
arm_compute::DataType::F16,
arm_compute::DataType::F32,
@@ -31,7 +31,8 @@ arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output)
arm_compute::DataType::QASYMM8_SIGNED,
arm_compute::DataType::QSYMM16,
arm_compute::DataType::QSYMM8,
- arm_compute::DataType::QSYMM8_PER_CHANNEL
+ arm_compute::DataType::QSYMM8_PER_CHANNEL,
+ arm_compute::DataType::S32
};
auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type());
@@ -110,6 +111,11 @@ void NeonConstantWorkload::Execute() const
CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output);
break;
}
+ case arm_compute::DataType::S32:
+ {
+ CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int32_t>(), output);
+ break;
+ }
default:
{
ARMNN_ASSERT_MSG(false, "Unknown data type");
diff --git a/src/backends/neon/workloads/NeonGatherWorkload.cpp b/src/backends/neon/workloads/NeonGatherWorkload.cpp
new file mode 100644
index 0000000000..2e7c741781
--- /dev/null
+++ b/src/backends/neon/workloads/NeonGatherWorkload.cpp
@@ -0,0 +1,46 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonGatherWorkload.hpp"
+#include "NeonWorkloadUtils.hpp"
+#include <armnn/utility/PolymorphicDowncast.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
+
+namespace armnn
+{
+arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& indices,
+ const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
+ const arm_compute::TensorInfo aclIndices = BuildArmComputeTensorInfo(indices);
+ const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
+
+ int aclAxis = ComputeAclAxis(0, input);
+
+ return arm_compute::NEGather::validate(&aclInput, &aclIndices, &aclOutput, aclAxis);
+}
+
+NeonGatherWorkload::NeonGatherWorkload(const GatherQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<GatherQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonGatherWorkload", 1, 1);
+
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& indices = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ int aclAxis = ComputeAclAxis(0, info.m_InputTensorInfos[0]);
+
+ m_Layer.configure(&input, &indices, &output, aclAxis);
+}
+
+void NeonGatherWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonGatherWorkload_Execute");
+ m_Layer.run();
+}
+} //namespace armnn \ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonGatherWorkload.hpp b/src/backends/neon/workloads/NeonGatherWorkload.hpp
new file mode 100644
index 0000000000..b1b47a5069
--- /dev/null
+++ b/src/backends/neon/workloads/NeonGatherWorkload.hpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/NEON/functions/NEGather.h>
+
+namespace armnn
+{
+arm_compute::Status NeonGatherWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& indices,
+ const TensorInfo& output);
+
+class NeonGatherWorkload : public BaseWorkload<GatherQueueDescriptor>
+{
+public:
+ NeonGatherWorkload(const GatherQueueDescriptor& descriptor, const WorkloadInfo& info);
+ virtual void Execute() const override;
+
+private:
+ mutable arm_compute::NEGather m_Layer;
+};
+
+} //namespace armnn \ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 2da6ea0c01..243f5a46ee 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -26,6 +26,7 @@
#include "NeonExpWorkload.hpp"
#include "NeonFloorFloatWorkload.hpp"
#include "NeonFullyConnectedWorkload.hpp"
+#include "NeonGatherWorkload.hpp"
#include "NeonInstanceNormalizationWorkload.hpp"
#include "NeonL2NormalizationFloatWorkload.hpp"
#include "NeonLstmFloatWorkload.hpp"