aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorkeidav01 <keith.davis@arm.com>2018-12-10 18:16:07 +0000
committerKeith Davis Arm <keith.davis@arm.com>2018-12-18 10:47:58 +0000
commitd74dc91af2d1302bf9024fcb1690d5df035f9c15 (patch)
tree0da683c1beb5cfc0c4264bc1d00bac4bf462f73a
parent2099595bac339c953bf4291be12703c8845e916e (diff)
downloadarmnn-d74dc91af2d1302bf9024fcb1690d5df035f9c15.tar.gz
IVGCVSW-2131 Implementation of StridedSlice operation
* Added ACl Implementation to ArmNN * Added helper function for setting CL Strided Slice arguments Change-Id: Ie10d387fdb054027ea9b8782743d270bb72949c1
-rw-r--r--src/backends/cl/ClLayerSupport.cpp13
-rw-r--r--src/backends/cl/ClLayerSupport.hpp5
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp2
-rw-r--r--src/backends/cl/backend.mk1
-rwxr-xr-xsrc/backends/cl/test/ClLayerTests.cpp21
-rw-r--r--src/backends/cl/workloads/CMakeLists.txt2
-rw-r--r--src/backends/cl/workloads/ClStridedSliceWorkload.cpp92
-rw-r--r--src/backends/cl/workloads/ClStridedSliceWorkload.hpp32
-rw-r--r--src/backends/cl/workloads/ClWorkloadUtils.hpp21
-rw-r--r--src/backends/cl/workloads/ClWorkloads.hpp1
10 files changed, 189 insertions, 1 deletions
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index a877a5f25f..eebab352e1 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -35,6 +35,7 @@
#include "workloads/ClPermuteWorkload.hpp"
#include "workloads/ClPooling2dWorkload.hpp"
#include "workloads/ClSoftmaxBaseWorkload.hpp"
+#include "workloads/ClStridedSliceWorkload.hpp"
#include "workloads/ClSubtractionWorkload.hpp"
#endif
@@ -497,6 +498,18 @@ bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
&TrueFunc<>);
}
+bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const StridedSliceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClStridedSliceWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output,
+ descriptor);
+}
+
bool ClLayerSupport::IsSubtractionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index cd1b72286a..470fa2acb4 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -169,6 +169,11 @@ public:
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsStridedSliceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const StridedSliceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsSubtractionSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 16ca2e6e41..942defb256 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -329,7 +329,7 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateBatchToSpaceNd(const BatchTo
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<ClStridedSliceWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index f0019790b0..71b600830a 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -40,6 +40,7 @@ BACKEND_SOURCES := \
workloads/ClSoftmaxBaseWorkload.cpp \
workloads/ClSoftmaxFloatWorkload.cpp \
workloads/ClSoftmaxUint8Workload.cpp \
+ workloads/ClStridedSliceWorkload.cpp \
workloads/ClSubtractionWorkload.cpp
# BACKEND_TEST_SOURCES contains the list of files to be included
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 10c177c8b8..90b93d70c8 100755
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -351,6 +351,27 @@ ARMNN_AUTO_TEST_CASE(MeanVtsFloat1, MeanVtsFloat1Test)
ARMNN_AUTO_TEST_CASE(MeanVtsFloat2, MeanVtsFloat2Test)
ARMNN_AUTO_TEST_CASE(MeanVtsFloat3, MeanVtsFloat3Test)
+// Strided Slice
+ARMNN_AUTO_TEST_CASE(StridedSlice4DFloat32, StridedSlice4DFloat32Test)
+ARMNN_AUTO_TEST_CASE(StridedSlice4DReverseFloat32, StridedSlice4DReverseFloat32Test)
+ARMNN_AUTO_TEST_CASE(StridedSliceSimpleStrideFloat32, StridedSliceSimpleStrideFloat32Test)
+ARMNN_AUTO_TEST_CASE(StridedSliceSimpleRangeMaskFloat32, StridedSliceSimpleRangeMaskFloat32Test)
+ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskFloat32, StridedSliceShrinkAxisMaskFloat32Test)
+ARMNN_AUTO_TEST_CASE(StridedSlice3DFloat32, StridedSlice3DFloat32Test)
+ARMNN_AUTO_TEST_CASE(StridedSlice3DReverseFloat32, StridedSlice3DReverseFloat32Test)
+ARMNN_AUTO_TEST_CASE(StridedSlice2DFloat32, StridedSlice2DFloat32Test)
+ARMNN_AUTO_TEST_CASE(StridedSlice2DReverseFloat32, StridedSlice2DReverseFloat32Test)
+
+ARMNN_AUTO_TEST_CASE(StridedSlice4DUint8, StridedSlice4DUint8Test)
+ARMNN_AUTO_TEST_CASE(StridedSlice4DReverseUint8, StridedSlice4DReverseUint8Test)
+ARMNN_AUTO_TEST_CASE(StridedSliceSimpleStrideUint8, StridedSliceSimpleStrideUint8Test)
+ARMNN_AUTO_TEST_CASE(StridedSliceSimpleRangeMaskUint8, StridedSliceSimpleRangeMaskUint8Test)
+ARMNN_AUTO_TEST_CASE(StridedSliceShrinkAxisMaskUint8, StridedSliceShrinkAxisMaskUint8Test)
+ARMNN_AUTO_TEST_CASE(StridedSlice3DUint8, StridedSlice3DUint8Test)
+ARMNN_AUTO_TEST_CASE(StridedSlice3DReverseUint8, StridedSlice3DReverseUint8Test)
+ARMNN_AUTO_TEST_CASE(StridedSlice2DUint8, StridedSlice2DUint8Test)
+ARMNN_AUTO_TEST_CASE(StridedSlice2DReverseUint8, StridedSlice2DReverseUint8Test)
+
// ============================================================================
// COMPARE tests
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index b7571f6895..11a0fd2db2 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -57,6 +57,8 @@ list(APPEND armnnClBackendWorkloads_sources
ClSoftmaxUint8Workload.cpp
ClSoftmaxUint8Workload.hpp
ClSplitterWorkload.hpp
+ ClStridedSliceWorkload.cpp
+ ClStridedSliceWorkload.hpp
ClSubtractionWorkload.cpp
ClSubtractionWorkload.hpp
ClWorkloads.hpp
diff --git a/src/backends/cl/workloads/ClStridedSliceWorkload.cpp b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
new file mode 100644
index 0000000000..e51fa34233
--- /dev/null
+++ b/src/backends/cl/workloads/ClStridedSliceWorkload.cpp
@@ -0,0 +1,92 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClStridedSliceWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+#include <cl/ClLayerSupport.hpp>
+#include <cl/ClTensorHandle.hpp>
+#include <cl/ClLayerSupport.hpp>
+
+namespace armnn
+{
+
+using namespace armcomputetensorutils;
+
+arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const StridedSliceDescriptor& descriptor)
+{
+ const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
+
+ arm_compute::Coordinates starts;
+ arm_compute::Coordinates ends;
+ arm_compute::Coordinates strides;
+
+ std::tie(starts, ends, strides) = SetClStridedSliceData(descriptor.m_Begin, descriptor.m_End, descriptor.m_Stride);
+
+ int32_t begin_mask = descriptor.m_BeginMask;
+ int32_t end_mask = descriptor.m_EndMask;
+ int32_t shrink_axis_mask = descriptor.m_ShrinkAxisMask;
+
+ return arm_compute::CLStridedSlice::validate(&aclInputInfo,
+ &aclOutputInfo,
+ starts,
+ ends,
+ strides,
+ begin_mask,
+ end_mask,
+ shrink_axis_mask);
+}
+
+ClStridedSliceWorkload::ClStridedSliceWorkload(const StridedSliceQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<StridedSliceQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("ClStridedSliceWorkload", 1, 1);
+
+ arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ arm_compute::Coordinates starts;
+ arm_compute::Coordinates ends;
+ arm_compute::Coordinates strides;
+
+ std::tie(starts, ends, strides) = SetClStridedSliceData(m_Data.m_Parameters.m_Begin,
+ m_Data.m_Parameters.m_End,
+ m_Data.m_Parameters.m_Stride);
+
+ int32_t begin_mask = m_Data.m_Parameters.m_BeginMask;
+ int32_t end_mask = m_Data.m_Parameters.m_EndMask;
+ int32_t shrink_axis_mask = m_Data.m_Parameters.m_ShrinkAxisMask;
+
+ arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+ input.info()->set_data_layout(aclDataLayout);
+ output.info()->set_data_layout(aclDataLayout);
+
+ m_StridedSliceLayer.configure(&input,
+ &output,
+ starts,
+ ends,
+ strides,
+ begin_mask,
+ end_mask,
+ shrink_axis_mask);
+}
+
+void ClStridedSliceWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClStridedSliceWorkload_Execute");
+ RunClFunction(m_StridedSliceLayer, CHECK_LOCATION());
+}
+
+} //namespace armnn
diff --git a/src/backends/cl/workloads/ClStridedSliceWorkload.hpp b/src/backends/cl/workloads/ClStridedSliceWorkload.hpp
new file mode 100644
index 0000000000..617ec7c021
--- /dev/null
+++ b/src/backends/cl/workloads/ClStridedSliceWorkload.hpp
@@ -0,0 +1,32 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Tensor.hpp>
+#include <armnn/Descriptors.hpp>
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/CL/CLFunctions.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClStridedSliceWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const StridedSliceDescriptor& descriptor);
+
+class ClStridedSliceWorkload : public BaseWorkload<StridedSliceQueueDescriptor>
+{
+public:
+ ClStridedSliceWorkload(const StridedSliceQueueDescriptor& descriptor, const WorkloadInfo& info);
+ void Execute() const override;
+
+private:
+ mutable arm_compute::CLStridedSlice m_StridedSliceLayer;
+};
+
+} //namespace armnn
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index 3bee2427f0..32dacdfc61 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -39,6 +39,27 @@ void CopyArmComputeClTensorData(arm_compute::CLTensor& dstTensor, const T* srcDa
dstTensor.unmap();
}
+inline auto SetClStridedSliceData(const std::vector<int>& m_begin,
+ const std::vector<int>& m_end,
+ const std::vector<int>& m_stride)
+{
+ arm_compute::Coordinates starts;
+ arm_compute::Coordinates ends;
+ arm_compute::Coordinates strides;
+
+ unsigned int num_dims = static_cast<unsigned int>(m_begin.size());
+
+ for (unsigned int i = 0; i < num_dims; i++) {
+ unsigned int revertedIndex = num_dims - i - 1;
+
+ starts.set(i, static_cast<int>(m_begin[revertedIndex]));
+ ends.set(i, static_cast<int>(m_end[revertedIndex]));
+ strides.set(i, static_cast<int>(m_stride[revertedIndex]));
+ }
+
+ return std::make_tuple(starts, ends, strides);
+}
+
inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
const ConstCpuTensorHandle* handle)
{
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index 39ecc53dc3..9e74327717 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -28,6 +28,7 @@
#include "ClSoftmaxFloatWorkload.hpp"
#include "ClSoftmaxUint8Workload.hpp"
#include "ClSplitterWorkload.hpp"
+#include "ClStridedSliceWorkload.hpp"
#include "ClSubtractionWorkload.hpp"
#include "ClConvertFp16ToFp32Workload.hpp"
#include "ClConvertFp32ToFp16Workload.hpp"