From 036f02d859c307f514c77e91665fbff05048ba11 Mon Sep 17 00:00:00 2001 From: josh minor Date: Fri, 15 Nov 2019 14:53:22 -0600 Subject: IVGCVSW-3729 Added neon slice workload and supporting neon layer tests * Support added for ACL neon slice workload * Utility function created to translate ArmNN slice layer params to ACL neon slice layer equivalent * Neon slice layer tests added as per SliceTestImpl.hpp Signed-off-by: josh minor Change-Id: Id583465311879af139e8e977f16ed2280c937ac7 --- src/backends/neon/NeonLayerSupport.cpp | 13 ++++++ src/backends/neon/NeonLayerSupport.hpp | 5 ++ src/backends/neon/NeonWorkloadFactory.cpp | 6 +++ src/backends/neon/NeonWorkloadFactory.hpp | 3 ++ src/backends/neon/backend.mk | 1 + src/backends/neon/test/NeonLayerTests.cpp | 14 ++++++ src/backends/neon/workloads/CMakeLists.txt | 2 + src/backends/neon/workloads/NeonSliceWorkload.cpp | 57 +++++++++++++++++++++++ src/backends/neon/workloads/NeonSliceWorkload.hpp | 31 ++++++++++++ src/backends/neon/workloads/NeonWorkloadUtils.hpp | 24 ++++++++++ src/backends/neon/workloads/NeonWorkloads.hpp | 1 + 11 files changed, 157 insertions(+) create mode 100644 src/backends/neon/workloads/NeonSliceWorkload.cpp create mode 100644 src/backends/neon/workloads/NeonSliceWorkload.hpp diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp index 0eeb948d3f..5410ef42df 100644 --- a/src/backends/neon/NeonLayerSupport.cpp +++ b/src/backends/neon/NeonLayerSupport.cpp @@ -46,6 +46,7 @@ #include "workloads/NeonQuantizedLstmWorkload.hpp" #include "workloads/NeonResizeWorkload.hpp" #include "workloads/NeonRsqrtWorkload.hpp" +#include "workloads/NeonSliceWorkload.hpp" #include "workloads/NeonSoftmaxBaseWorkload.hpp" #include "workloads/NeonSpaceToDepthWorkload.hpp" #include "workloads/NeonSplitterWorkload.hpp" @@ -619,6 +620,18 @@ bool NeonLayerSupport::IsRsqrtSupported(const TensorInfo& input, FORWARD_WORKLOAD_VALIDATE_FUNC(NeonRsqrtWorkloadValidate, reasonIfUnsupported, input, output); } +bool NeonLayerSupport::IsSliceSupported(const TensorInfo& input, + const TensorInfo& output, + const SliceDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSliceWorkloadValidate, + reasonIfUnsupported, + input, + output, + descriptor); +} + bool NeonLayerSupport::IsSoftmaxSupported(const TensorInfo& input, const TensorInfo& output, const SoftmaxDescriptor& descriptor, diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp index 9c1ca2a729..5d4fbad97f 100644 --- a/src/backends/neon/NeonLayerSupport.hpp +++ b/src/backends/neon/NeonLayerSupport.hpp @@ -218,6 +218,11 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsSliceSupported(const TensorInfo& input, + const TensorInfo& output, + const SliceDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsSoftmaxSupported(const TensorInfo& input, const TensorInfo& output, const SoftmaxDescriptor& descriptor, diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp index dda1d7a132..dd11af4484 100644 --- a/src/backends/neon/NeonWorkloadFactory.cpp +++ b/src/backends/neon/NeonWorkloadFactory.cpp @@ -428,6 +428,12 @@ std::unique_ptr NeonWorkloadFactory::CreateRsqrt(const RsqrtQueueDesc return std::make_unique(descriptor, info); } +std::unique_ptr NeonWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::make_unique(descriptor, info); +} + std::unique_ptr NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) const { diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp index f876092c45..89a495985e 100644 --- a/src/backends/neon/NeonWorkloadFactory.hpp +++ b/src/backends/neon/NeonWorkloadFactory.hpp @@ -188,6 +188,9 @@ public: std::unique_ptr CreateRsqrt(const RsqrtQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr CreateSlice(const SliceQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + std::unique_ptr CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk index c66f5da6a5..23289345ea 100644 --- a/src/backends/neon/backend.mk +++ b/src/backends/neon/backend.mk @@ -54,6 +54,7 @@ BACKEND_SOURCES := \ workloads/NeonReshapeWorkload.cpp \ workloads/NeonResizeWorkload.cpp \ workloads/NeonRsqrtWorkload.cpp \ + workloads/NeonSliceWorkload.cpp \ workloads/NeonSoftmaxBaseWorkload.cpp \ workloads/NeonSoftmaxFloatWorkload.cpp \ workloads/NeonSoftmaxUint8Workload.cpp \ diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index ea9c813d34..cd9a55d3ee 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -756,6 +756,20 @@ ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc, ResizeNearestNeighborMagTest, DataLayout::NHWC, 0.1f, 50, 0.1f, 50) +// Slice +ARMNN_AUTO_TEST_CASE(Slice4dFloat32, Slice4dFloat32Test) +ARMNN_AUTO_TEST_CASE(Slice3dFloat32, Slice3dFloat32Test) +ARMNN_AUTO_TEST_CASE(Slice2dFloat32, Slice2dFloat32Test) +ARMNN_AUTO_TEST_CASE(Slice1dFloat32, Slice1dFloat32Test) +ARMNN_AUTO_TEST_CASE(Slice4dUint8, Slice4dUint8Test) +ARMNN_AUTO_TEST_CASE(Slice3dUint8, Slice3dUint8Test) +ARMNN_AUTO_TEST_CASE(Slice2dUint8, Slice2dUint8Test) +ARMNN_AUTO_TEST_CASE(Slice1dUint8, Slice1dUint8Test) +ARMNN_AUTO_TEST_CASE(Slice4dInt16, Slice4dInt16Test) +ARMNN_AUTO_TEST_CASE(Slice3dInt16, Slice3dInt16Test) +ARMNN_AUTO_TEST_CASE(Slice2dInt16, Slice2dInt16Test) +ARMNN_AUTO_TEST_CASE(Slice1dInt16, Slice1dInt16Test) + // Strided Slice ARMNN_AUTO_TEST_CASE(StridedSlice4dFloat32, StridedSlice4dFloat32Test) ARMNN_AUTO_TEST_CASE(StridedSlice4dReverseFloat32, StridedSlice4dReverseFloat32Test) diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt index 46e4928882..19ac9c9d00 100644 --- a/src/backends/neon/workloads/CMakeLists.txt +++ b/src/backends/neon/workloads/CMakeLists.txt @@ -70,6 +70,8 @@ list(APPEND armnnNeonBackendWorkloads_sources NeonResizeWorkload.hpp NeonRsqrtWorkload.cpp NeonRsqrtWorkload.hpp + NeonSliceWorkload.cpp + NeonSliceWorkload.hpp NeonSoftmaxBaseWorkload.cpp NeonSoftmaxBaseWorkload.hpp NeonSoftmaxFloatWorkload.cpp diff --git a/src/backends/neon/workloads/NeonSliceWorkload.cpp b/src/backends/neon/workloads/NeonSliceWorkload.cpp new file mode 100644 index 0000000000..171edc6c59 --- /dev/null +++ b/src/backends/neon/workloads/NeonSliceWorkload.cpp @@ -0,0 +1,57 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "NeonSliceWorkload.hpp" + +#include "NeonWorkloadUtils.hpp" + +#include + +#include + +#include + +namespace armnn +{ + +arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const SliceDescriptor& descriptor) +{ + const arm_compute::TensorInfo aclInputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(input); + const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output); + + arm_compute::Coordinates starts; + arm_compute::Coordinates ends; + + std::tie(starts, ends) = SetNeonSliceData(descriptor.m_Begin, descriptor.m_Size); + + return arm_compute::NESlice::validate(&aclInputInfo, &aclOutputInfo, starts, ends); +} + +NeonSliceWorkload::NeonSliceWorkload(const SliceQueueDescriptor& descriptor, + const WorkloadInfo& info) + : BaseWorkload(descriptor, info) +{ + m_Data.ValidateInputsOutputs("NeonSliceWorkload", 1, 1); + + arm_compute::ITensor& input = boost::polymorphic_downcast(m_Data.m_Inputs[0])->GetTensor(); + arm_compute::ITensor& output = boost::polymorphic_downcast(m_Data.m_Outputs[0])->GetTensor(); + + arm_compute::Coordinates starts; + arm_compute::Coordinates ends; + + std::tie(starts, ends) = SetNeonSliceData(m_Data.m_Parameters.m_Begin, m_Data.m_Parameters.m_Size); + + m_SliceFunction.configure(&input, &output, starts, ends); +} + +void NeonSliceWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSliceWorkload_Execute"); + m_SliceFunction.run(); +} + +} // namespace armnn diff --git a/src/backends/neon/workloads/NeonSliceWorkload.hpp b/src/backends/neon/workloads/NeonSliceWorkload.hpp new file mode 100644 index 0000000000..d1f32a9d7e --- /dev/null +++ b/src/backends/neon/workloads/NeonSliceWorkload.hpp @@ -0,0 +1,31 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +#include +#include +#include + +namespace armnn +{ + +arm_compute::Status NeonSliceWorkloadValidate(const TensorInfo& input, + const TensorInfo& output, + const SliceDescriptor& descriptor); + +class NeonSliceWorkload : public BaseWorkload +{ +public: + NeonSliceWorkload(const SliceQueueDescriptor& descriptor, const WorkloadInfo& info); + virtual void Execute() const override; + +private: + mutable arm_compute::NESlice m_SliceFunction; +}; + +} // namespace armnn \ No newline at end of file diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp index 8cf97d3909..f63946ec07 100644 --- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp +++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp @@ -76,4 +76,28 @@ inline auto SetNeonStridedSliceData(const std::vector& m_begin, return std::make_tuple(starts, ends, strides); } +inline auto SetNeonSliceData(const std::vector& m_begin, + const std::vector& m_size) +{ + // This function must translate the size vector given to an end vector + // expected by the ACL NESlice workload + arm_compute::Coordinates starts; + arm_compute::Coordinates ends; + + unsigned int num_dims = static_cast(m_begin.size()); + + // For strided slices, we have the relationship size = (end - begin) / stride + // For slice, we assume stride to be a vector of all ones, yielding the formula + // size = (end - begin) therefore we know end = size + begin + for (unsigned int i = 0; i < num_dims; i++) + { + unsigned int revertedIndex = num_dims - i - 1; + + starts.set(i, static_cast(m_begin[revertedIndex])); + ends.set(i, static_cast(m_begin[revertedIndex] + m_size[revertedIndex])); + } + + return std::make_tuple(starts, ends); +} + } //namespace armnn diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp index 7d99d260a6..77d819e3ab 100644 --- a/src/backends/neon/workloads/NeonWorkloads.hpp +++ b/src/backends/neon/workloads/NeonWorkloads.hpp @@ -37,6 +37,7 @@ #include "NeonReshapeWorkload.hpp" #include "NeonResizeWorkload.hpp" #include "NeonRsqrtWorkload.hpp" +#include "NeonSliceWorkload.hpp" #include "NeonSoftmaxFloatWorkload.hpp" #include "NeonSoftmaxUint8Workload.hpp" #include "NeonSpaceToDepthWorkload.hpp" -- cgit v1.2.1