aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2021-01-25 11:49:03 +0000
committerMichele Di Giorgio <michele.digiorgio@arm.com>2021-01-26 18:23:41 +0000
commit4cfab1886549d7582e3867c51278132e5d37681b (patch)
treee3d8e502a360581c422787c7452acad77fa1824f
parent1cf1a7f046f3ff9518cdad21b1f0c164e46345f7 (diff)
downloadComputeLibrary-4cfab1886549d7582e3867c51278132e5d37681b.tar.gz
Make CLArithmeticSubtraction kernel and function state-less
Resolves COMPMID-4008 Change-Id: Ic5f40610e771f31e6d301dfae976c81e9c79fa8b Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4917 Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp1
-rw-r--r--arm_compute/runtime/CL/functions/CLElementwiseOperations.h67
-rw-r--r--src/core/gpu/cl/kernels/ClActivationKernel.h4
-rw-r--r--src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h4
-rw-r--r--src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h4
-rw-r--r--src/core/gpu/cl/kernels/ClFloorKernel.h4
-rw-r--r--src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h4
-rw-r--r--src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h6
-rw-r--r--src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h10
-rw-r--r--src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h4
-rw-r--r--src/runtime/CL/functions/CLElementwiseOperations.cpp35
-rw-r--r--src/runtime/gpu/cl/operators/ClAdd.h6
-rw-r--r--src/runtime/gpu/cl/operators/ClConcatenate.h4
-rw-r--r--src/runtime/gpu/cl/operators/ClSub.cpp47
-rw-r--r--src/runtime/gpu/cl/operators/ClSub.h100
15 files changed, 180 insertions, 120 deletions
diff --git a/Android.bp b/Android.bp
index 6937ab5ea6..7cfa817193 100644
--- a/Android.bp
+++ b/Android.bp
@@ -798,6 +798,7 @@ cc_library_static {
"src/runtime/gpu/cl/operators/ClAdd.cpp",
"src/runtime/gpu/cl/operators/ClConcatenate.cpp",
"src/runtime/gpu/cl/operators/ClFloor.cpp",
+ "src/runtime/gpu/cl/operators/ClSub.cpp",
"utils/CommonGraphOptions.cpp",
"utils/GraphUtils.cpp",
"utils/Utils.cpp",
diff --git a/arm_compute/runtime/CL/functions/CLElementwiseOperations.h b/arm_compute/runtime/CL/functions/CLElementwiseOperations.h
index 4dd491221e..c8c7e0c587 100644
--- a/arm_compute/runtime/CL/functions/CLElementwiseOperations.h
+++ b/arm_compute/runtime/CL/functions/CLElementwiseOperations.h
@@ -35,73 +35,6 @@ class ITensorInfo;
namespace experimental
{
-/** Basic function to run @ref arm_compute::opencl::kernels::ClSaturatedArithmeticKernel for subtraction
- *
- * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/S32/F16/F32.
- * @note The function performs an arithmetic subtraction between two tensors.
- */
-class CLArithmeticSubtraction : public ICLOperator
-{
-public:
- /** Default Constructor */
- CLArithmeticSubtraction();
- /** Initialise the kernel's inputs, output and conversion policy.
- *
- * Valid configurations (Input1,Input2) -> Output :
- *
- * - (U8,U8) -> U8
- * - (U8,U8) -> S16
- * - (S16,U8) -> S16
- * - (U8,S16) -> S16
- * - (S16,S16) -> S16
- * - (S32,S32) -> S32
- * - (F16,F16) -> F16
- * - (F32,F32) -> F32
- * - (QASYMM8,QASYMM8) -> QASYMM8
- * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
- * - (QSYMM16,QSYMM16) -> QSYMM16
- *
- * @param[in] compile_context The compile context to be used.
- * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
- * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
- * @param[in, out] input2 Second tensor input. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
- * The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
- * @param[out] output Output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
- * @param[in] policy Policy to use to handle overflow.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- */
- void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy,
- const ActivationLayerInfo &act_info = ActivationLayerInfo());
- /** Static function to check if given info will lead to a valid configuration of @ref arm_compute::opencl::kernels::ClSaturatedArithmeticKernel for subtraction
- *
- * Valid configurations (Input1,Input2) -> Output :
- *
- * - (U8,U8) -> U8
- * - (U8,U8) -> S16
- * - (S16,U8) -> S16
- * - (U8,S16) -> S16
- * - (S16,S16) -> S16
- * - (S32,S32) -> S32
- * - (F16,F16) -> F16
- * - (F32,F32) -> F32
- * - (QASYMM8,QASYMM8) -> QASYMM8
- * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
- * - (QSYMM16,QSYMM16) -> QSYMM16
- *
- * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
- * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
- * @param[in] output Output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
- * @param[in] policy Policy to use to handle overflow.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
- *
- * @return a status
- */
- static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- void run(ITensorPack &tensors) override;
-};
-
/** Basic function to run @ref arm_compute::opencl::kernels::ClSaturatedArithmeticKernel for division
*
* @note The tensor data type for the inputs must be F16/F32.
diff --git a/src/core/gpu/cl/kernels/ClActivationKernel.h b/src/core/gpu/cl/kernels/ClActivationKernel.h
index 30adc552b3..68c309e9e7 100644
--- a/src/core/gpu/cl/kernels/ClActivationKernel.h
+++ b/src/core/gpu/cl/kernels/ClActivationKernel.h
@@ -45,9 +45,9 @@ public:
* @note If the output tensor is a nullptr, the activation function will be performed in-place
*
* @param[in] compile_context The compile context to be used.
- * @param[in, out] src Source tensor. In case of @p dst tensor = nullptr, this tensor will store the result
+ * @param[in, out] src Source tensor info. In case of @p dst tensor = nullptr, this tensor will store the result
* of the activation function. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM16/F16/F32.
- * @param[out] dst Destination tensor. Data type supported: same as @p src
+ * @param[out] dst Destination tensor info. Data type supported: same as @p src
* @param[in] act_info Activation layer information.
*/
void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, ActivationLayerInfo act_info);
diff --git a/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h b/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h
index 378a08aa4f..d9fa905e8e 100644
--- a/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h
+++ b/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h
@@ -46,9 +46,9 @@ public:
/** Initialise the kernel's source and destination
*
* @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor. Data types supported: All.
+ * @param[in] src Source tensor info. Data types supported: All.
* @param[in] batch_offset The offset on axis # 3.
- * @param[in,out] dst Destination tensor. Data types supported: Same as @p src.
+ * @param[in,out] dst Destination tensor info. Data types supported: Same as @p src.
*
* @note: The dst tensor's low two dimensions can't be smaller than the src one's.
* @note: The gaps between the two lowest dimensions of src and dst need to be divisible by 2.
diff --git a/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h b/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h
index 144d7d48f2..5acfb33199 100644
--- a/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h
+++ b/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h
@@ -46,9 +46,9 @@ public:
/** Initialise the kernel's source and destination
*
* @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+ * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] depth_offset The offset on the Z axis.
- * @param[in,out] dst Destination tensor. Data types supported: Same as @p src.
+ * @param[in,out] dst Destination tensor info. Data types supported: Same as @p src.
*
* @note: The dst tensor's low two dimensions can't be smaller than the src one's.
* @note: The gaps between the two lowest dimensions of src and dst need to be divisible by 2.
diff --git a/src/core/gpu/cl/kernels/ClFloorKernel.h b/src/core/gpu/cl/kernels/ClFloorKernel.h
index 09ab801f2d..646dfb30d8 100644
--- a/src/core/gpu/cl/kernels/ClFloorKernel.h
+++ b/src/core/gpu/cl/kernels/ClFloorKernel.h
@@ -43,8 +43,8 @@ public:
/** Configure kernel for a given list of arguments
*
* @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor. Data type supported: F16/F32.
- * @param[out] dst Destination tensor. Same as @p src
+ * @param[in] src Source tensor info. Data type supported: F16/F32.
+ * @param[out] dst Destination tensor info. Same as @p src
*/
void configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst);
diff --git a/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h b/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h
index 88cd4c4d17..9a4380a5b7 100644
--- a/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h
+++ b/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h
@@ -46,9 +46,9 @@ public:
/** Initialise the kernel's source and destination
*
* @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor. Data types supported: All.
+ * @param[in] src Source tensor info. Data types supported: All.
* @param[in] height_offset The starting offset on the Y axis for the dst tensor.
- * @param[out] dst Destination tensor. Data types supported: same as @p src.
+ * @param[out] dst Destination tensor info. Data types supported: same as @p src.
*
*/
void configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int height_offset, ITensorInfo *dst);
diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h b/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h
index 92715008cf..ddade29113 100644
--- a/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h
+++ b/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h
@@ -46,9 +46,9 @@ public:
/** Initialise the kernel's sources and destination
*
* @param[in] compile_context The compile context to be used.
- * @param[in] src1 First source tensor. Data types supported: All.
- * @param[in] src2 Second source tensor. Data types supported: same as @p src1
- * @param[out] dst Destination tensor. Data types supported: Same as @p src1.
+ * @param[in] src1 First source tensor info. Data types supported: All.
+ * @param[in] src2 Second source tensor info. Data types supported: same as @p src1
+ * @param[out] dst Destination tensor info. Data types supported: Same as @p src1.
*/
void configure(const CLCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst);
/** Static function to check if given info will lead to a valid configuration of @ref ClWidthConcatenate2TensorsKernel
diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h b/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h
index 06d6c0399a..19bda65902 100644
--- a/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h
+++ b/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h
@@ -47,11 +47,11 @@ public:
/** Initialise the kernel's sources and destination
*
* @param[in] compile_context The compile context to be used.
- * @param[in] src1 First source tensor. Data types supported: All.
- * @param[in] src2 Second source tensor. Data types supported: same as @p src1
- * @param[in] src3 Third source tensor. Data types supported: same as @p src1
- * @param[in] src4 Fourth source tensor. Data types supported: same as @p src1
- * @param[out] dst Destination tensor. Data types supported: same as @p src1.
+ * @param[in] src1 First source tensor info. Data types supported: All.
+ * @param[in] src2 Second source tensor info. Data types supported: same as @p src1
+ * @param[in] src3 Third source tensor info. Data types supported: same as @p src1
+ * @param[in] src4 Fourth source tensor info. Data types supported: same as @p src1
+ * @param[out] dst Destination tensor info. Data types supported: same as @p src1.
*/
void configure(const CLCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *src3, ITensorInfo *src4, ITensorInfo *dst);
/** Static function to check if given info will lead to a valid configuration of @ref ClWidthConcatenate4TensorsKernel
diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h b/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h
index 3bffe52700..6bc8e57a08 100644
--- a/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h
+++ b/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h
@@ -46,9 +46,9 @@ public:
/** Initialise the kernel's source and destination
*
* @param[in] compile_context The compile context to be used.
- * @param[in] src Source tensor. Data types supported: All.
+ * @param[in] src Source tensor info. Data types supported: All.
* @param[in] width_offset The offset on the X axis.
- * @param[in,out] dst Destination tensor. Data types supported: same as @p src.
+ * @param[in,out] dst Destination tensor info. Data types supported: same as @p src.
*
*/
void configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int width_offset, ITensorInfo *dst);
diff --git a/src/runtime/CL/functions/CLElementwiseOperations.cpp b/src/runtime/CL/functions/CLElementwiseOperations.cpp
index 638990e472..9b809eebc7 100644
--- a/src/runtime/CL/functions/CLElementwiseOperations.cpp
+++ b/src/runtime/CL/functions/CLElementwiseOperations.cpp
@@ -28,6 +28,7 @@
#include "src/core/gpu/cl/kernels/ClElementwiseKernel.h"
#include "src/runtime/gpu/cl/operators/ClAdd.h"
+#include "src/runtime/gpu/cl/operators/ClSub.h"
#include <utility>
@@ -35,28 +36,6 @@ namespace arm_compute
{
namespace experimental
{
-CLArithmeticSubtraction::CLArithmeticSubtraction()
-{
-}
-void CLArithmeticSubtraction::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy,
- const ActivationLayerInfo &act_info)
-{
- auto k = std::make_unique<arm_compute::opencl::kernels::ClSaturatedArithmeticKernel>();
- k->configure(compile_context, ArithmeticOperation::SUB, input1, input2, output, policy, act_info);
- _kernel = std::move(k);
-}
-
-Status CLArithmeticSubtraction::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_UNUSED(policy);
- return arm_compute::opencl::kernels::ClSaturatedArithmeticKernel::validate(ArithmeticOperation::SUB, input1, input2, output, policy, act_info);
-}
-
-void CLArithmeticSubtraction::run(ITensorPack &tensors)
-{
- ICLOperator::run(tensors);
-}
-
CLArithmeticDivision::CLArithmeticDivision()
{
}
@@ -210,10 +189,10 @@ void CLArithmeticAddition::run()
struct CLArithmeticSubtraction::Impl
{
- const ICLTensor *src_0{ nullptr };
- const ICLTensor *src_1{ nullptr };
- ICLTensor *dst{ nullptr };
- std::unique_ptr<experimental::CLArithmeticSubtraction> op{ nullptr };
+ const ICLTensor *src_0{ nullptr };
+ const ICLTensor *src_1{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<opencl::ClSub> op{ nullptr };
};
CLArithmeticSubtraction::CLArithmeticSubtraction()
@@ -235,13 +214,13 @@ void CLArithmeticSubtraction::configure(const CLCompileContext &compile_context,
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
- _impl->op = std::make_unique<experimental::CLArithmeticSubtraction>();
+ _impl->op = std::make_unique<opencl::ClSub>();
_impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), policy, act_info);
}
Status CLArithmeticSubtraction::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
{
- return experimental::CLArithmeticSubtraction::validate(input1, input2, output, policy, act_info);
+ return opencl::ClSub::validate(input1, input2, output, policy, act_info);
}
void CLArithmeticSubtraction::run()
diff --git a/src/runtime/gpu/cl/operators/ClAdd.h b/src/runtime/gpu/cl/operators/ClAdd.h
index 2854c16180..f751d8dc83 100644
--- a/src/runtime/gpu/cl/operators/ClAdd.h
+++ b/src/runtime/gpu/cl/operators/ClAdd.h
@@ -58,11 +58,11 @@ public:
* - (QSYMM16,QSYMM16) -> QSYMM16
*
* @param[in] compile_context The compile context to be used.
- * @param[in, out] src1 First source tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+ * @param[in, out] src1 First source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
* The source tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
- * @param[in, out] src2 Second source tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+ * @param[in, out] src2 Second source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
* The source tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
- * @param[out] dst Destination tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+ * @param[out] dst Destination tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
* @param[in] policy Policy to use to handle overflow.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
diff --git a/src/runtime/gpu/cl/operators/ClConcatenate.h b/src/runtime/gpu/cl/operators/ClConcatenate.h
index 112e2ac6b7..0d960a605c 100644
--- a/src/runtime/gpu/cl/operators/ClConcatenate.h
+++ b/src/runtime/gpu/cl/operators/ClConcatenate.h
@@ -54,8 +54,8 @@ public:
*
*
* @param[in] compile_context The compile context to be used.
- * @param[in,out] src_vector The vectors containing all the tensors to concatenate. Data types supported: All
- * @param[out] dst Destination tensor. Data types supported: same as @p src_vector.
+ * @param[in,out] src_vector The vectors containing all the tensors info to concatenate. Data types supported: All
+ * @param[out] dst Destination tensor info. Data types supported: same as @p src_vector.
* @param[in] axis Concatenation axis. Supported underlying concatenation axis are 0, 1, 2 and 3.
*/
void configure(const ClCompileContext &compile_context, const std::vector<ITensorInfo *> &src_vector, ITensorInfo *dst, size_t axis);
diff --git a/src/runtime/gpu/cl/operators/ClSub.cpp b/src/runtime/gpu/cl/operators/ClSub.cpp
new file mode 100644
index 0000000000..429f23a837
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClSub.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/gpu/cl/operators/ClSub.h"
+
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/core/gpu/cl/kernels/ClElementwiseKernel.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+void ClSub::configure(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst,
+ ConvertPolicy policy, const ActivationLayerInfo &act_info)
+{
+ auto k = std::make_unique<kernels::ClSaturatedArithmeticKernel>();
+ k->configure(compile_context, ArithmeticOperation::SUB, src1, src2, dst, policy, act_info);
+ _kernel = std::move(k);
+}
+
+Status ClSub::validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst,
+ ConvertPolicy policy, const ActivationLayerInfo &act_info)
+{
+ return kernels::ClSaturatedArithmeticKernel::validate(ArithmeticOperation::SUB, src1, src2, dst, policy, act_info);
+}
+} // namespace opencl
+} // namespace arm_compute
diff --git a/src/runtime/gpu/cl/operators/ClSub.h b/src/runtime/gpu/cl/operators/ClSub.h
new file mode 100644
index 0000000000..bcad84d583
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClSub.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_SUB_H
+#define ARM_COMPUTE_CL_SUB_H
+
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/runtime/gpu/cl/IClOperator.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+/** Basic function to run arithmetic subtraction
+ *
+ * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+ * @note The function performs an arithmetic subtraction between two tensors.
+ */
+class ClSub : public IClOperator
+{
+public:
+ /** Default Constructor */
+ ClSub() = default;
+ /** Configure function for a given list of arguments.
+ *
+ * Valid configurations (src1,src2) -> dst :
+ *
+ * - (U8,U8) -> U8
+ * - (U8,U8) -> S16
+ * - (S16,U8) -> S16
+ * - (U8,S16) -> S16
+ * - (S16,S16) -> S16
+ * - (S32,S32) -> S32
+ * - (F16,F16) -> F16
+ * - (F32,F32) -> F32
+ * - (QASYMM8,QASYMM8) -> QASYMM8
+ * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
+ * - (QSYMM16,QSYMM16) -> QSYMM16
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in, out] src1 First source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+ * The source tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+ * @param[in, out] src2 Second source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+ * The source tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+ * @param[out] dst Destination tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+ * @param[in] policy Policy to use to handle overflow.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ */
+ void configure(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, ConvertPolicy policy,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ /** Static function to check if given info will lead to a valid configuration of @ref ClSub
+ *
+ * Valid configurations (src1,src2) -> dst :
+ *
+ * - (U8,U8) -> U8
+ * - (U8,U8) -> S16
+ * - (S16,U8) -> S16
+ * - (U8,S16) -> S16
+ * - (S16,S16) -> S16
+ * - (S32,S32) -> S32
+ * - (F16,F16) -> F16
+ * - (F32,F32) -> F32
+ * - (QASYMM8,QASYMM8) -> QASYMM8
+ * - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
+ * - (QSYMM16,QSYMM16) -> QSYMM16
+ *
+ * @param[in] src1 First source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+ * @param[in] src2 Second source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+ * @param[in] dst Destination tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+ * @param[in] policy Policy to use to handle overflow.
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, ConvertPolicy policy,
+ const ActivationLayerInfo &act_info = ActivationLayerInfo());
+};
+} // namespace opencl
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_SUB_H */