aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Jackson <matthew.jackson@arm.com>2019-07-31 14:06:28 +0100
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-08-01 09:42:03 +0000
commitd5166106ebfc11b1db832a708892e12edeaf997e (patch)
tree8f4be789711ed50b0bddf50a813caf4b823a59e9
parent2208b604dea6919525a12779df9aa5e49ea6a539 (diff)
downloadarmnn-d5166106ebfc11b1db832a708892e12edeaf997e.tar.gz
IVGCVSW-3539 Add CL support and tests for Stack
* Added CL backend support for Stack * Added unit tests for Stack on the CL backend * Refactored unit tests to support generic data types Signed-off-by: Matthew Jackson <matthew.jackson@arm.com> Change-Id: I38ee3e9d8947ea98a3104c982698001e704d7d89
-rw-r--r--src/armnn/test/CreateWorkload.hpp9
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp2
-rw-r--r--src/backends/cl/ClLayerSupport.cpp13
-rw-r--r--src/backends/cl/ClLayerSupport.hpp5
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp6
-rw-r--r--src/backends/cl/ClWorkloadFactory.hpp3
-rw-r--r--src/backends/cl/backend.mk1
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp38
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp7
-rw-r--r--src/backends/cl/workloads/CMakeLists.txt2
-rw-r--r--src/backends/cl/workloads/ClStackWorkload.cpp74
-rw-r--r--src/backends/cl/workloads/ClStackWorkload.hpp29
-rw-r--r--src/backends/cl/workloads/ClWorkloads.hpp1
-rw-r--r--src/backends/reference/test/RefCreateWorkloadTests.cpp36
14 files changed, 204 insertions, 22 deletions
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 619c6d02cd..98cdfaff0e 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -1381,17 +1381,16 @@ std::unique_ptr<SpaceToDepthWorkload> CreateSpaceToDepthWorkloadTest(armnn::IWor
return workload;
}
-template <typename StackWorkload>
+template <typename StackWorkload, armnn::DataType DataType>
std::unique_ptr<StackWorkload> CreateStackWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph,
const armnn::TensorShape& inputShape,
const armnn::TensorShape& outputShape,
unsigned int axis,
- unsigned int numInputs,
- armnn::DataType dataType)
+ unsigned int numInputs)
{
- armnn::TensorInfo inputTensorInfo(inputShape, dataType);
- armnn::TensorInfo outputTensorInfo(outputShape, dataType);
+ armnn::TensorInfo inputTensorInfo(inputShape, DataType);
+ armnn::TensorInfo outputTensorInfo(outputShape, DataType);
// Constructs the Stack layer.
armnn::StackDescriptor descriptor(axis, numInputs, inputShape);
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 6d03da74fc..bd7f1c627b 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -193,7 +193,7 @@ public:
const WorkloadInfo& info) const;
virtual std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const;
+ const WorkloadInfo& info) const;
virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& Info) const;
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index b737daf7f4..05539623a5 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -46,6 +46,7 @@
#include "workloads/ClSpaceToBatchNdWorkload.hpp"
#include "workloads/ClSpaceToDepthWorkload.hpp"
#include "workloads/ClSplitterWorkload.hpp"
+#include "workloads/ClStackWorkload.hpp"
#include "workloads/ClStridedSliceWorkload.hpp"
#include "workloads/ClSubtractionWorkload.hpp"
#include "workloads/ClTransposeConvolution2dWorkload.hpp"
@@ -671,6 +672,18 @@ bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
return true;
}
+bool ClLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClStackWorkloadValidate,
+ reasonIfUnsupported,
+ inputs,
+ output,
+ descriptor);
+}
+
bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 63a4daf864..4879e8b4b8 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -218,6 +218,11 @@ public:
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsStackSupported(const std::vector<const TensorInfo*>& inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsStridedSliceSupported(const TensorInfo& input,
const TensorInfo& output,
const StridedSliceDescriptor& descriptor,
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 6ce87d872e..4a593aac63 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -433,4 +433,10 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSpaceToDepth(const SpaceToDe
return MakeWorkload<ClSpaceToDepthWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateStack(const StackQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return MakeWorkload<ClStackWorkload>(descriptor, info);
+}
+
} // namespace armnn
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 3b0ac826d7..8586435481 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -182,6 +182,9 @@ public:
std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateStack(const StackQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
private:
template<typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
static std::unique_ptr<IWorkload> MakeWorkload(const QueueDescriptorType& descriptor,
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index 57d7cb9f03..ee6447f340 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -55,6 +55,7 @@ BACKEND_SOURCES := \
workloads/ClSpaceToBatchNdWorkload.cpp \
workloads/ClSpaceToDepthWorkload.cpp \
workloads/ClSplitterWorkload.cpp \
+ workloads/ClStackWorkload.cpp \
workloads/ClStridedSliceWorkload.cpp \
workloads/ClSubtractionWorkload.cpp \
workloads/ClTransposeConvolution2dWorkload.cpp
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index de13390f87..f453ccc9fd 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -936,4 +936,42 @@ BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQSymm16Workload)
ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
}
+template <armnn::DataType DataType>
+static void ClCreateStackWorkloadTest(const std::initializer_list<unsigned int>& inputShape,
+ const std::initializer_list<unsigned int>& outputShape,
+ unsigned int axis,
+ unsigned int numInputs)
+{
+ armnn::Graph graph;
+ ClWorkloadFactory factory =
+ ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
+
+ auto workload = CreateStackWorkloadTest<ClStackWorkload, DataType>(factory,
+ graph,
+ TensorShape(inputShape),
+ TensorShape(outputShape),
+ axis,
+ numInputs);
+
+ // Check inputs and output are as expected
+ StackQueueDescriptor queueDescriptor = workload->GetData();
+ for (unsigned int i = 0; i < numInputs; ++i)
+ {
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[i]);
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
+ }
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
+}
+
+BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
+{
+ ClCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+}
+
+BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
+{
+ ClCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 37af471658..dd4c16edf4 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -496,6 +496,13 @@ ARMNN_AUTO_TEST_CASE(SpaceToDepthNCHW2Float32, SpaceToDepthNCHWFloat32Test2)
ARMNN_AUTO_TEST_CASE(SpaceToDepthNHWCQSymm16, SpaceToDepthNHWCQSymm16Test)
ARMNN_AUTO_TEST_CASE(SpaceToDepthNCHWQSymm16, SpaceToDepthNCHWQSymm16Test)
+// Stack
+ARMNN_AUTO_TEST_CASE(Stack0Axis, Stack0AxisTest<armnn::DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Stack4dOutput1Axis, Stack4dOutput1AxisTest<armnn::DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Stack4dOutput2Axis, Stack4dOutput2AxisTest<armnn::DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Stack4dOutput3Axis, Stack4dOutput3AxisTest<armnn::DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Stack3dOutput1Axis3Input, Stack3dOutput1Axis3InputTest<armnn::DataType::Float32>)
+
// Strided Slice
ARMNN_AUTO_TEST_CASE(StridedSlice4DFloat32, StridedSlice4DFloat32Test)
ARMNN_AUTO_TEST_CASE(StridedSlice4DReverseFloat32, StridedSlice4DReverseFloat32Test)
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index 2a3b1ad6b6..49a8b177d0 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -74,6 +74,8 @@ list(APPEND armnnClBackendWorkloads_sources
ClSpaceToDepthWorkload.hpp
ClSplitterWorkload.cpp
ClSplitterWorkload.hpp
+ ClStackWorkload.cpp
+ ClStackWorkload.hpp
ClStridedSliceWorkload.cpp
ClStridedSliceWorkload.hpp
ClSubtractionWorkload.cpp
diff --git a/src/backends/cl/workloads/ClStackWorkload.cpp b/src/backends/cl/workloads/ClStackWorkload.cpp
new file mode 100644
index 0000000000..3ba698ec4d
--- /dev/null
+++ b/src/backends/cl/workloads/ClStackWorkload.cpp
@@ -0,0 +1,74 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "ClStackWorkload.hpp"
+#include "ClWorkloadUtils.hpp"
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <cl/ClTensorHandle.hpp>
+#include <cl/ClLayerSupport.hpp>
+
+#include <arm_compute/core/Types.h>
+
+#include <boost/numeric/conversion/cast.hpp>
+#include <boost/polymorphic_pointer_cast.hpp>
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+namespace
+{
+int CalcAxis(const unsigned int axis, const unsigned int inputDimensions)
+{
+ const int intAxis = boost::numeric_cast<int>(axis);
+ return boost::numeric_cast<int>(inputDimensions) - intAxis;
+}
+} //namespace
+
+arm_compute::Status ClStackWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor)
+{
+ std::vector<arm_compute::ITensorInfo*> aclInputPtrs;
+ arm_compute::TensorInfo aclInputInfo;
+ for (const TensorInfo* input : inputs)
+ {
+ aclInputInfo = BuildArmComputeTensorInfo(*input);
+ aclInputPtrs.emplace_back(&aclInputInfo);
+ }
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+ int aclAxis = CalcAxis(descriptor.m_Axis, descriptor.m_InputShape.GetNumDimensions());
+
+ return arm_compute::CLStackLayer::validate(aclInputPtrs, aclAxis, &aclOutputInfo);
+}
+
+ClStackWorkload::ClStackWorkload(const StackQueueDescriptor& descriptor, const WorkloadInfo& info)
+: BaseWorkload<StackQueueDescriptor>(descriptor, info)
+{
+ std::vector<arm_compute::ICLTensor*> aclInputs;
+ for (auto input : m_Data.m_Inputs)
+ {
+ arm_compute::ICLTensor& aclInput = boost::polymorphic_pointer_downcast<IClTensorHandle>(input)->GetTensor();
+ aclInputs.emplace_back(&aclInput);
+ }
+ arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+ m_Data.m_Outputs[0])->GetTensor();
+
+ m_Layer.reset(new arm_compute::CLStackLayer());
+ int aclAxis = CalcAxis(descriptor.m_Parameters.m_Axis, descriptor.m_Parameters.m_InputShape.GetNumDimensions());
+ m_Layer->configure(aclInputs, aclAxis, &output);
+}
+
+void ClStackWorkload::Execute() const
+{
+ if (m_Layer)
+ {
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClStackWorkload_Execute");
+ m_Layer->run();
+ }
+}
+
+} //namespace armnn \ No newline at end of file
diff --git a/src/backends/cl/workloads/ClStackWorkload.hpp b/src/backends/cl/workloads/ClStackWorkload.hpp
new file mode 100644
index 0000000000..75008697a3
--- /dev/null
+++ b/src/backends/cl/workloads/ClStackWorkload.hpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/runtime/CL/CLFunctions.h>
+
+namespace armnn
+{
+arm_compute::Status ClStackWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
+ const TensorInfo& output,
+ const StackDescriptor& descriptor);
+
+class ClStackWorkload : public BaseWorkload<StackQueueDescriptor>
+{
+public:
+ ClStackWorkload(const StackQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+ void Execute() const override;
+
+private:
+ mutable std::unique_ptr<arm_compute::CLStackLayer> m_Layer;
+};
+
+} //namespace armnn
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index a64dea27e7..03dffc4edc 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -36,6 +36,7 @@
#include "ClSpaceToBatchNdWorkload.hpp"
#include "ClSpaceToDepthWorkload.hpp"
#include "ClSplitterWorkload.hpp"
+#include "ClStackWorkload.hpp"
#include "ClStridedSliceWorkload.hpp"
#include "ClSubtractionWorkload.hpp"
#include "ClConvertFp16ToFp32Workload.hpp"
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index f7999d0ffe..04c9acbe39 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -990,41 +990,45 @@ BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQSymm16)
RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
}
+template <armnn::DataType DataType>
static void RefCreateStackWorkloadTest(const armnn::TensorShape& inputShape,
const armnn::TensorShape& outputShape,
unsigned int axis,
- unsigned int numInputs,
- armnn::DataType dataType)
+ unsigned int numInputs)
{
armnn::Graph graph;
RefWorkloadFactory factory;
- auto workload = CreateStackWorkloadTest<RefStackWorkload>(factory,
- graph,
- inputShape,
- outputShape,
- axis,
- numInputs,
- dataType);
-
- // Check output is as expected
- auto queueDescriptor = workload->GetData();
+ auto workload = CreateStackWorkloadTest<RefStackWorkload, DataType>(factory,
+ graph,
+ inputShape,
+ outputShape,
+ axis,
+ numInputs);
+
+ // Check inputs and output are as expected
+ StackQueueDescriptor queueDescriptor = workload->GetData();
+ for (unsigned int i = 0; i < numInputs; ++i)
+ {
+ auto inputHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Inputs[i]);
+ BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType)));
+ }
auto outputHandle = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType)));
+ BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
}
BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
{
- RefCreateStackWorkloadTest({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2, armnn::DataType::Float32);
+ RefCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
{
- RefCreateStackWorkloadTest({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2, armnn::DataType::QuantisedAsymm8);
+ RefCreateStackWorkloadTest<armnn::DataType::QuantisedAsymm8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
BOOST_AUTO_TEST_CASE(CreateStackUint16Workload)
{
- RefCreateStackWorkloadTest({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2, armnn::DataType::QuantisedSymm16);
+ RefCreateStackWorkloadTest<armnn::DataType::QuantisedSymm16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
}
BOOST_AUTO_TEST_SUITE_END()