aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/cl')
-rw-r--r--src/backends/cl/ClLayerSupport.cpp32
-rw-r--r--src/backends/cl/ClLayerSupport.hpp7
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp36
-rw-r--r--src/backends/cl/ClWorkloadFactory.hpp5
-rw-r--r--src/backends/cl/test/ClCreateWorkloadTests.cpp14
-rw-r--r--src/backends/cl/test/ClEndToEndTests.cpp12
6 files changed, 88 insertions, 18 deletions
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index f7129d6035..f8cc5074b3 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -160,10 +160,8 @@ bool ClLayerSupport::IsAbsSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate,
- reasonIfUnsupported,
- input,
- output);
+ ElementwiseUnaryDescriptor descriptor(UnaryOperation::Abs);
+ return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
}
bool ClLayerSupport::IsActivationSupported(const TensorInfo& input,
@@ -425,6 +423,29 @@ bool ClLayerSupport::IsDivisionSupported(const TensorInfo& input0,
output);
}
+bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ElementwiseUnaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ if (descriptor.m_Operation == UnaryOperation::Abs)
+ {
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClAbsWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output);
+ }
+ else if (descriptor.m_Operation == UnaryOperation::Rsqrt)
+ {
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output);
+ }
+
+ return false;
+}
+
bool ClLayerSupport::IsFloorSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
@@ -685,7 +706,8 @@ bool ClLayerSupport::IsRsqrtSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- FORWARD_WORKLOAD_VALIDATE_FUNC(ClRsqrtWorkloadValidate, reasonIfUnsupported, input, output);
+ ElementwiseUnaryDescriptor descriptor(UnaryOperation::Rsqrt);
+ return IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported);
}
bool ClLayerSupport::IsSliceSupported(const TensorInfo& input,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index a21589d555..9371717013 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -12,6 +12,7 @@ namespace armnn
class ClLayerSupport : public LayerSupportBase
{
public:
+ ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
bool IsAbsSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -102,6 +103,11 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsElementwiseUnarySupported(const TensorInfo& input,
+ const TensorInfo& ouput,
+ const ElementwiseUnaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsFloorSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
@@ -223,6 +229,7 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ ARMNN_DEPRECATED_MSG("Use IsElementwiseUnarySupported instead")
bool IsRsqrtSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index f9e6632b0c..4bb2e2a8ce 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -131,7 +131,12 @@ std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorH
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<ClAbsWorkload>(descriptor, info);
+ boost::ignore_unused(descriptor);
+
+ ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
+ elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs);
+
+ return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
@@ -249,6 +254,28 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateDivision(const DivisionQueue
return MakeWorkload<ClDivisionFloatWorkload, NullWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ if (descriptor.m_Parameters.m_Operation == UnaryOperation::Abs)
+ {
+ AbsQueueDescriptor absQueueDescriptor;
+ absQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+ absQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+
+ return MakeWorkload<ClAbsWorkload>(absQueueDescriptor, info);
+ }
+ else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Rsqrt)
+ {
+ RsqrtQueueDescriptor rsqrtQueueDescriptor;
+ rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+ rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+
+ return MakeWorkload<ClRsqrtWorkload>(rsqrtQueueDescriptor, info);
+ }
+ return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -450,7 +477,12 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateResizeBilinear(const ResizeB
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<ClRsqrtWorkload>(descriptor, info);
+ boost::ignore_unused(descriptor);
+
+ ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
+ elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt);
+
+ return CreateElementwiseUnary(elementwiseUnaryDescriptor, info);
}
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateSlice(const SliceQueueDescriptor& descriptor,
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 8f377e959d..980be9192e 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -38,6 +38,7 @@ public:
DataLayout dataLayout,
const bool IsMemoryManaged = true) const override;
+ ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
std::unique_ptr<IWorkload> CreateAbs(const AbsQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -92,6 +93,9 @@ public:
std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -178,6 +182,7 @@ public:
std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ ARMNN_DEPRECATED_MSG("Use CreateElementwiseUnary instead")
std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index d79745c420..92e771760f 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -146,18 +146,16 @@ BOOST_AUTO_TEST_CASE(CreateDivisionFloat16WorkloadTest)
armnn::DataType::Float16>();
}
-template <typename WorkloadType,
+template <typename WorkloadType,
typename DescriptorType,
- typename LayerType,
armnn::DataType DataType>
-static void ClCreateElementwiseUnaryWorkloadTest()
+static void ClCreateElementwiseUnaryWorkloadTest(armnn::UnaryOperation op)
{
Graph graph;
ClWorkloadFactory factory =
ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
- auto workload = CreateElementwiseUnaryWorkloadTest
- <WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
+ auto workload = CreateElementwiseUnaryWorkloadTest<WorkloadType, DescriptorType, DataType>(factory, graph, op);
DescriptorType queueDescriptor = workload->GetData();
@@ -170,10 +168,8 @@ static void ClCreateElementwiseUnaryWorkloadTest()
BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32WorkloadTest)
{
- ClCreateElementwiseUnaryWorkloadTest<ClRsqrtWorkload,
- RsqrtQueueDescriptor,
- RsqrtLayer,
- armnn::DataType::Float32>();
+ ClCreateElementwiseUnaryWorkloadTest<ClRsqrtWorkload, RsqrtQueueDescriptor, armnn::DataType::Float32>(
+ UnaryOperation::Rsqrt);
}
template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index 260f8f68cd..eafdb7c3e5 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -5,12 +5,12 @@
#include <backendsCommon/test/EndToEndTestImpl.hpp>
-#include <backendsCommon/test/AbsEndToEndTestImpl.hpp>
#include <backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp>
#include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp>
#include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
#include <backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp>
#include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
+#include <backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp>
#include <backendsCommon/test/InstanceNormalizationEndToEndTestImpl.hpp>
#include <backendsCommon/test/PreluEndToEndTestImpl.hpp>
#include <backendsCommon/test/QuantizedLstmEndToEndTestImpl.hpp>
@@ -27,7 +27,15 @@ std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::GpuAcc};
// Abs
BOOST_AUTO_TEST_CASE(ClAbsEndToEndTestFloat32)
{
- AbsEndToEnd<armnn::DataType::Float32>(defaultBackends);
+ std::vector<float> expectedOutput =
+ {
+ 1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
+ 3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
+ };
+
+ ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+ UnaryOperation::Abs,
+ expectedOutput);
}
// Constant