aboutsummaryrefslogtreecommitdiff
path: root/src/backends/cl
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/cl')
-rw-r--r--src/backends/cl/ClLayerSupport.cpp8
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp4
-rw-r--r--src/backends/cl/backend.mk1
-rw-r--r--src/backends/cl/test/ClLayerTests.cpp8
-rw-r--r--src/backends/cl/workloads/CMakeLists.txt2
-rw-r--r--src/backends/cl/workloads/ClNegWorkload.cpp44
-rw-r--r--src/backends/cl/workloads/ClNegWorkload.hpp28
-rw-r--r--src/backends/cl/workloads/ClWorkloads.hpp1
8 files changed, 96 insertions, 0 deletions
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index cdb93d7218..7f7554ab54 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -40,6 +40,7 @@
#include "workloads/ClConcatWorkload.hpp"
#include "workloads/ClMinimumWorkload.hpp"
#include "workloads/ClMultiplicationWorkload.hpp"
+#include "workloads/ClNegWorkload.hpp"
#include "workloads/ClNormalizationFloatWorkload.hpp"
#include "workloads/ClPadWorkload.hpp"
#include "workloads/ClPermuteWorkload.hpp"
@@ -415,6 +416,13 @@ bool ClLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
input,
output);
}
+ else if (descriptor.m_Operation == UnaryOperation::Neg)
+ {
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClNegWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output);
+ }
return false;
}
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 4f707beebe..ead0bc36a4 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -272,6 +272,10 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateElementwiseUnary(const Eleme
return MakeWorkload<ClRsqrtWorkload>(rsqrtQueueDescriptor, info);
}
+ else if (descriptor.m_Parameters.m_Operation == UnaryOperation::Neg)
+ {
+ return MakeWorkload<ClNegWorkload>(descriptor, info);
+ }
return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
}
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index e326add9e9..c8da9b714b 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -47,6 +47,7 @@ BACKEND_SOURCES := \
workloads/ClMeanWorkload.cpp \
workloads/ClMinimumWorkload.cpp \
workloads/ClMultiplicationWorkload.cpp \
+ workloads/ClNegWorkload.cpp \
workloads/ClNormalizationFloatWorkload.cpp \
workloads/ClPadWorkload.cpp \
workloads/ClPermuteWorkload.cpp \
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index a79f1ca1ce..df80da215e 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -949,6 +949,14 @@ ARMNN_AUTO_TEST_CASE(ArgMaxChannelQAsymm8, ArgMaxChannelTest<DataType::QAsymmU8>
ARMNN_AUTO_TEST_CASE(ArgMaxHeightQAsymm8, ArgMaxHeightTest<DataType::QAsymmU8>)
ARMNN_AUTO_TEST_CASE(ArgMinWidthQAsymm8, ArgMinWidthTest<DataType::QAsymmU8>)
+// Neg
+ARMNN_AUTO_TEST_CASE(Neg2d, Neg2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Neg3d, Neg3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(NegZero, NegZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(NegNegative, NegNegativeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(Neg2dFloat16, Neg2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(Neg3dFloat16, Neg3dTest<DataType::Float16>)
+
#if defined(ARMNNREF_ENABLED)
// The ARMNN_COMPARE_REF_AUTO_TEST_CASE and the ARMNN_COMPARE_REF_FIXTURE_TEST_CASE test units are not available
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index 17d69b1ed5..3f964eb1a6 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -54,6 +54,8 @@ list(APPEND armnnClBackendWorkloads_sources
ClMinimumWorkload.hpp
ClMultiplicationWorkload.cpp
ClMultiplicationWorkload.hpp
+ ClNegWorkload.cpp
+ ClNegWorkload.hpp
ClNormalizationFloatWorkload.cpp
ClNormalizationFloatWorkload.hpp
ClPadWorkload.cpp
diff --git a/src/backends/cl/workloads/ClNegWorkload.cpp b/src/backends/cl/workloads/ClNegWorkload.cpp
new file mode 100644
index 0000000000..cc6333fff9
--- /dev/null
+++ b/src/backends/cl/workloads/ClNegWorkload.cpp
@@ -0,0 +1,44 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClNegWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <cl/ClTensorHandle.hpp>
+
+#include <boost/cast.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status ClNegWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+ const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ return arm_compute::CLNegLayer::validate(&aclInput, &aclOutput);
+}
+
+ClNegWorkload::ClNegWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("ClNegWorkload", 1, 1);
+
+ arm_compute::ICLTensor& input = boost::polymorphic_downcast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& output = boost::polymorphic_downcast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ m_NegLayer.configure(&input, &output);
+}
+
+void ClNegWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClNegWorkload_Execute");
+ RunClFunction(m_NegLayer, CHECK_LOCATION());
+}
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClNegWorkload.hpp b/src/backends/cl/workloads/ClNegWorkload.hpp
new file mode 100644
index 0000000000..9dbfa07665
--- /dev/null
+++ b/src/backends/cl/workloads/ClNegWorkload.hpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClNegWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
+class ClNegWorkload : public BaseWorkload<ElementwiseUnaryQueueDescriptor>
+{
+public:
+ ClNegWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+ virtual void Execute() const override;
+
+private:
+ mutable arm_compute::CLNegLayer m_NegLayer;
+};
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index ec193d5e3e..c7c016379e 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -27,6 +27,7 @@
#include "ClMeanWorkload.hpp"
#include "ClMinimumWorkload.hpp"
#include "ClMultiplicationWorkload.hpp"
+#include "ClNegWorkload.hpp"
#include "ClNormalizationFloatWorkload.hpp"
#include "ClPermuteWorkload.hpp"
#include "ClPadWorkload.hpp"