aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-09-17 17:27:04 +0100
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-09-18 11:09:02 +0100
commit92b9f87099260178d2a3d61a42af2a86762eaca7 (patch)
tree4556a8a8d86f46cc36b9a6b305bcaa49bf8f057d /src/backends/reference
parent44a0142b17492a5af4cfa28b08c6763e463e6ca3 (diff)
downloadarmnn-92b9f87099260178d2a3d61a42af2a86762eaca7.tar.gz
IVGCVSW-3878 Add reference workload for SLICE
* Added reference workload implementation and layer tests for all supported tensor dimensions (1d, 2d, 3d, 4d) Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: I40eb300828933e9183027281105d1a7e597d1569
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp27
-rw-r--r--src/backends/reference/RefLayerSupport.hpp5
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp6
-rw-r--r--src/backends/reference/RefWorkloadFactory.hpp3
-rw-r--r--src/backends/reference/backend.mk2
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp16
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt4
-rw-r--r--src/backends/reference/workloads/RefSliceWorkload.cpp29
-rw-r--r--src/backends/reference/workloads/RefSliceWorkload.hpp22
-rw-r--r--src/backends/reference/workloads/RefWorkloads.hpp5
-rw-r--r--src/backends/reference/workloads/Slice.cpp95
-rw-r--r--src/backends/reference/workloads/Slice.hpp21
12 files changed, 233 insertions, 2 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 228f8a8809..572f617636 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -1374,6 +1374,33 @@ bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input,
return supported;
}
+bool RefLayerSupport::IsSliceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SliceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ ignore_unused(descriptor);
+ bool supported = true;
+
+ std::array<DataType, 3> supportedTypes =
+ {
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+ "Reference Slice: input type not supported");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Reference Slice: output type not supported");
+
+ supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+ "Reference Slice: input and output types are mismatched");
+
+ return supported;
+}
+
bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 26c60dcf30..8200058633 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -219,6 +219,11 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsSliceSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const SliceDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const SoftmaxDescriptor& descriptor,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 52dffcc1f8..055c8da600 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -457,4 +457,10 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAbs(const AbsQueueDescripto
return std::make_unique<RefAbsWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::make_unique<RefSliceWorkload>(descriptor, info);
+}
+
} // namespace armnn
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 5851528f4a..2c40053f73 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -212,6 +212,9 @@ public:
std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateSlice(const SliceQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
private:
template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index b1f0a03782..b2ec7488e2 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -69,6 +69,7 @@ BACKEND_SOURCES := \
workloads/RefResizeBilinearWorkload.cpp \
workloads/RefResizeWorkload.cpp \
workloads/RefRsqrtWorkload.cpp \
+ workloads/RefSliceWorkload.cpp \
workloads/RefSoftmaxWorkload.cpp \
workloads/RefSpaceToBatchNdWorkload.cpp \
workloads/RefSpaceToDepthWorkload.cpp \
@@ -78,6 +79,7 @@ BACKEND_SOURCES := \
workloads/RefTransposeConvolution2dWorkload.cpp \
workloads/Resize.cpp \
workloads/Rsqrt.cpp \
+ workloads/Slice.cpp \
workloads/SpaceToBatchNd.cpp \
workloads/SpaceToDepth.cpp \
workloads/Stack.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index eb56dde884..afeb8a458a 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -1252,6 +1252,22 @@ ARMNN_AUTO_TEST_CASE(PreluFloat16, PreluTest<DataType::Float16>)
ARMNN_AUTO_TEST_CASE(PreluUint8, PreluTest<DataType::QuantisedAsymm8>)
ARMNN_AUTO_TEST_CASE(PreluInt16, PreluTest<DataType::QuantisedSymm16>)
+// Slice
+ARMNN_AUTO_TEST_CASE(Slice4dFloat32, Slice4dFloat32Test)
+ARMNN_AUTO_TEST_CASE(Slice3dFloat32, Slice3dFloat32Test)
+ARMNN_AUTO_TEST_CASE(Slice2dFloat32, Slice2dFloat32Test)
+ARMNN_AUTO_TEST_CASE(Slice1dFloat32, Slice1dFloat32Test)
+
+ARMNN_AUTO_TEST_CASE(Slice4dUint8, Slice4dUint8Test)
+ARMNN_AUTO_TEST_CASE(Slice3dUint8, Slice3dUint8Test)
+ARMNN_AUTO_TEST_CASE(Slice2dUint8, Slice2dUint8Test)
+ARMNN_AUTO_TEST_CASE(Slice1dUint8, Slice1dUint8Test)
+
+ARMNN_AUTO_TEST_CASE(Slice4dInt16, Slice4dInt16Test)
+ARMNN_AUTO_TEST_CASE(Slice3dInt16, Slice3dInt16Test)
+ARMNN_AUTO_TEST_CASE(Slice2dInt16, Slice2dInt16Test)
+ARMNN_AUTO_TEST_CASE(Slice1dInt16, Slice1dInt16Test)
+
// TransposeConvolution2d
ARMNN_AUTO_TEST_CASE(SimpleTransposeConvolution2dFloatNchw,
SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 23d6024530..30770956ba 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -107,6 +107,8 @@ list(APPEND armnnRefBackendWorkloads_sources
RefResizeWorkload.hpp
RefRsqrtWorkload.cpp
RefRsqrtWorkload.hpp
+ RefSliceWorkload.cpp
+ RefSliceWorkload.hpp
RefSoftmaxWorkload.cpp
RefSoftmaxWorkload.hpp
RefSpaceToBatchNdWorkload.cpp
@@ -127,6 +129,8 @@ list(APPEND armnnRefBackendWorkloads_sources
Resize.hpp
Rsqrt.cpp
Rsqrt.hpp
+ Slice.cpp
+ Slice.hpp
Softmax.cpp
Softmax.hpp
SpaceToBatchNd.hpp
diff --git a/src/backends/reference/workloads/RefSliceWorkload.cpp b/src/backends/reference/workloads/RefSliceWorkload.cpp
new file mode 100644
index 0000000000..2e448450c1
--- /dev/null
+++ b/src/backends/reference/workloads/RefSliceWorkload.cpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefSliceWorkload.hpp"
+
+#include "RefWorkloadUtils.hpp"
+#include "Slice.hpp"
+
+#include <Profiling.hpp>
+
+namespace armnn
+{
+
+void RefSliceWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSliceWorkload_Execute");
+
+ const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+
+ Slice(inputInfo,
+ m_Data.m_Parameters,
+ m_Data.m_Inputs[0]->Map(),
+ m_Data.m_Outputs[0]->Map(),
+ GetDataTypeSize(inputInfo.GetDataType()));
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefSliceWorkload.hpp b/src/backends/reference/workloads/RefSliceWorkload.hpp
new file mode 100644
index 0000000000..006c7b775d
--- /dev/null
+++ b/src/backends/reference/workloads/RefSliceWorkload.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefSliceWorkload : public BaseWorkload<SliceQueueDescriptor>
+{
+public:
+ using BaseWorkload<SliceQueueDescriptor>::BaseWorkload;
+
+ virtual void Execute() const override;
+};
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 1ec349ee22..959226adf6 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -41,10 +41,11 @@
#include "RefPadWorkload.hpp"
#include "RefPreluWorkload.hpp"
#include "RefQuantizeWorkload.hpp"
+#include "RefReshapeWorkload.hpp"
#include "RefResizeBilinearWorkload.hpp"
#include "RefResizeWorkload.hpp"
#include "RefRsqrtWorkload.hpp"
-#include "RefReshapeWorkload.hpp"
+#include "RefSliceWorkload.hpp"
#include "RefSplitterWorkload.hpp"
#include "RefSoftmaxWorkload.hpp"
#include "RefSpaceToBatchNdWorkload.hpp"
@@ -56,4 +57,4 @@
#include "Resize.hpp"
#include "Softmax.hpp"
#include "Splitter.hpp"
-#include "TensorBufferArrayView.hpp" \ No newline at end of file
+#include "TensorBufferArrayView.hpp"
diff --git a/src/backends/reference/workloads/Slice.cpp b/src/backends/reference/workloads/Slice.cpp
new file mode 100644
index 0000000000..c7ca3b156e
--- /dev/null
+++ b/src/backends/reference/workloads/Slice.cpp
@@ -0,0 +1,95 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Slice.hpp"
+
+#include <boost/assert.hpp>
+#include <boost/core/ignore_unused.hpp>
+#include <boost/numeric/conversion/cast.hpp>
+
+namespace armnn
+{
+
+void Slice(const TensorInfo& inputInfo,
+ const SliceDescriptor& descriptor,
+ const void* inputData,
+ void* outputData,
+ unsigned int dataTypeSize)
+{
+ const TensorShape& inputShape = inputInfo.GetShape();
+ const unsigned int numDims = inputShape.GetNumDimensions();
+
+ BOOST_ASSERT(descriptor.m_Begin.size() == numDims);
+ BOOST_ASSERT(descriptor.m_Size.size() == numDims);
+
+ constexpr unsigned int maxNumDims = 4;
+ BOOST_ASSERT(numDims <= maxNumDims);
+
+ std::vector<unsigned int> paddedInput(4);
+ std::vector<unsigned int> paddedBegin(4);
+ std::vector<unsigned int> paddedSize (4);
+
+ const unsigned int numPaddingDims = maxNumDims - numDims;
+ for (unsigned int i = 0u; i < maxNumDims; ++i)
+ {
+ if (i < numPaddingDims)
+ {
+ paddedInput[i] = 1u;
+ paddedBegin[i] = 0u;
+ paddedSize[i] = 1u;
+ }
+ else
+ {
+ const unsigned int j = i - numPaddingDims;
+ paddedInput[i] = inputShape[j];
+ paddedBegin[i] = descriptor.m_Begin[j];
+ paddedSize[i] = descriptor.m_Size[j];
+ }
+ }
+
+ unsigned int dim0 = paddedInput[0];
+ unsigned int dim1 = paddedInput[1];
+ unsigned int dim2 = paddedInput[2];
+ unsigned int dim3 = paddedInput[3];
+
+ unsigned int begin0 = paddedBegin[0];
+ unsigned int begin1 = paddedBegin[1];
+ unsigned int begin2 = paddedBegin[2];
+ unsigned int begin3 = paddedBegin[3];
+
+ unsigned int size0 = paddedSize[0];
+ unsigned int size1 = paddedSize[1];
+ unsigned int size2 = paddedSize[2];
+ unsigned int size3 = paddedSize[3];
+
+ BOOST_ASSERT(begin0 + size0 <= dim0);
+ BOOST_ASSERT(begin1 + size1 <= dim1);
+ BOOST_ASSERT(begin2 + size2 <= dim2);
+ BOOST_ASSERT(begin3 + size3 <= dim3);
+
+ const unsigned char* input = reinterpret_cast<const unsigned char*>(inputData);
+ unsigned char* output = reinterpret_cast<unsigned char*>(outputData);
+
+ boost::ignore_unused(dim0);
+ for (unsigned int idx0 = begin0; idx0 < begin0 + size0; ++idx0)
+ {
+ for (unsigned int idx1 = begin1; idx1 < begin1 + size1; ++idx1)
+ {
+ for (unsigned int idx2 = begin2; idx2 < begin2 + size2; ++idx2)
+ {
+ for (unsigned int idx3 = begin3; idx3 < begin3 + size3; ++idx3)
+ {
+ const unsigned int inputOffset =
+ (((idx0 * dim1 + idx1) * dim2 + idx2) * dim3 + idx3) * dataTypeSize;
+
+ ::memcpy(output, input + inputOffset, dataTypeSize);
+ output += dataTypeSize;
+ }
+ }
+ }
+ }
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/Slice.hpp b/src/backends/reference/workloads/Slice.hpp
new file mode 100644
index 0000000000..823f16c052
--- /dev/null
+++ b/src/backends/reference/workloads/Slice.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "BaseIterator.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+namespace armnn
+{
+
+void Slice(const TensorInfo& inputInfo,
+ const SliceDescriptor& descriptor,
+ const void* inputData,
+ void* outputData,
+ unsigned int dataTypeSize);
+
+} // namespace armnn