aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL/cl_kernels
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-06-04 17:31:46 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-06-05 14:14:48 +0000
commit4b3fba1850fdf84ba3f9a0c98acf3de672330b34 (patch)
tree1b65639ec7387c474903583ff0927918c8c7d837 /src/core/CL/cl_kernels
parentc625acd2a60a4fe34633c5cecef85c230933f772 (diff)
downloadComputeLibrary-4b3fba1850fdf84ba3f9a0c98acf3de672330b34.tar.gz
COMPMID-2372: Add support for QASYMM8 for Tanh
-Perform calculations in the floating point domain -Extends checks for Logistic as scale should be 1/256 and offset 0 Change-Id: I90ef4a042f053976936f5d28f8e09b54eec196a2 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/1287 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/CL/cl_kernels')
-rw-r--r--src/core/CL/cl_kernels/activation_layer_qa8.cl119
1 files changed, 63 insertions, 56 deletions
diff --git a/src/core/CL/cl_kernels/activation_layer_qa8.cl b/src/core/CL/cl_kernels/activation_layer_qa8.cl
index cfb61376ca..41f23ca79b 100644
--- a/src/core/CL/cl_kernels/activation_layer_qa8.cl
+++ b/src/core/CL/cl_kernels/activation_layer_qa8.cl
@@ -26,52 +26,25 @@
#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE)
-// RELU Activation
-inline TYPE relu_op(TYPE x)
-{
- return max((TYPE)CONST_0, x);
-}
-// Bounded RELU Activation
-inline TYPE brelu_op(TYPE x)
-{
- return min((TYPE)A_VAL, max(CONST_0, x));
-}
-// Lower Upper Bounded RELU Activation
-inline TYPE lu_brelu_op(TYPE x)
-{
- return min(max(x, (TYPE)B_VAL), (TYPE)A_VAL);
-}
+#if defined(FLOAT_DOMAIN)
+// Activations performed in the float domain
-#define ACTIVATION_OP2(op, x) op##_op(x)
-#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x)
-
-#if defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL)
-#define PERFORM_ACTIVATION_QA8(act, data) \
- ({ \
- data = ACTIVATION_OP(act, data); \
- \
- VEC_DATA_TYPE(float, VEC_SIZE) \
- fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)); \
- \
- fdata = round((fdata - (float)O1_VAL) * ((float)S1_VAL / (float)S2_VAL) + (float)O2_VAL); \
- data = CONVERT_SAT(fdata, VEC_DATA_TYPE(uchar, VEC_SIZE)); \
- })
-#else /* defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL) */
-#define PERFORM_ACTIVATION_QA8(act, data) \
- ({ \
- data = ACTIVATION_OP(act, data); \
- })
-#endif /* defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL) */
+#include "activation_float_helpers.h"
-#if defined(ACT)
+#if defined(O2_VAL) && defined(S2_VAL)
+#define OFFSET_OUT O2_VAL
+#define SCALE_OUT S2_VAL
+#else // defined(O2_VAL) && defined(S2_VAL)
+#define OFFSET_OUT O1_VAL
+#define SCALE_OUT S1_VAL
+#endif // defined(O2_VAL) && defined(S2_VAL)
-/** This performs an activation function on QASYMM8 inputs.
+/** This performs an activation function on QASYMM8 inputs with float transformations.
*
* @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
- * @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH
* @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
* @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively.
* @note Quantization offsets of the input/output tensors are passed in with -DO1_VAL= and -DO2_VAL= respectively.
@@ -94,7 +67,7 @@ inline TYPE lu_brelu_op(TYPE x)
* @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
*/
-__kernel void activation_layer_qa8(
+__kernel void activation_layer_qa8_f32(
TENSOR3D_DECLARATION(input)
#ifndef IN_PLACE
,
@@ -113,29 +86,65 @@ __kernel void activation_layer_qa8(
// Load data
TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr);
- data = PERFORM_ACTIVATION_QA8(ACT, data);
+ VEC_FLOAT data_flt = CONVERT(data, VEC_FLOAT);
+ data_flt = round(data_flt - (float)O1_VAL) * ((float)S1_VAL);
+ data_flt = ACTIVATION(ACT, float, data_flt, A_VAL, B_VAL);
+
+ data = CONVERT_SAT(round(data_flt / ((float)SCALE_OUT)) + (float)OFFSET_OUT, TYPE);
// Store result
VSTORE(VEC_SIZE)
(data, 0, (__global DATA_TYPE *)output.ptr);
}
-#endif /* defined(ACT) */
+#else // defined(FLOAT_DOMAIN)
+// Activations performed in the quantized domain
-#if defined(O2_VAL) && defined(S2_VAL)
-#define OFFSET_OUT O2_VAL
-#define SCALE_OUT S2_VAL
-#else // defined(O2_VAL) && defined(S2_VAL)
-#define OFFSET_OUT O1_VAL
-#define SCALE_OUT S1_VAL
-#endif // defined(O2_VAL) && defined(S2_VAL)
+// RELU Activation
+inline TYPE relu_op(TYPE x)
+{
+ return max((TYPE)CONST_0, x);
+}
+// Bounded RELU Activation
+inline TYPE brelu_op(TYPE x)
+{
+ return min((TYPE)A_VAL, max(CONST_0, x));
+}
+// Lower Upper Bounded RELU Activation
+inline TYPE lu_brelu_op(TYPE x)
+{
+ return min(max(x, (TYPE)B_VAL), (TYPE)A_VAL);
+}
+
+#define ACTIVATION_OP2(op, x) op##_op(x)
+#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x)
+
+#if defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL)
+#define PERFORM_ACTIVATION_QA8(act, data) \
+ ({ \
+ data = ACTIVATION_OP(act, data); \
+ \
+ VEC_DATA_TYPE(float, VEC_SIZE) \
+ fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)); \
+ \
+ fdata = round((fdata - (float)O1_VAL) * ((float)S1_VAL / (float)S2_VAL) + (float)O2_VAL); \
+ data = CONVERT_SAT(fdata, VEC_DATA_TYPE(uchar, VEC_SIZE)); \
+ })
+#else /* defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL) */
+#define PERFORM_ACTIVATION_QA8(act, data) \
+ ({ \
+ data = ACTIVATION_OP(act, data); \
+ })
+#endif /* defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL) */
-/** This performs a Logistic activation function on QASYMM8 inputs.
+#if defined(ACT)
+/** This performs an activation function on QASYMM8 inputs.
*
* @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH
* @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
* @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively.
* @note Quantization offsets of the input/output tensors are passed in with -DO1_VAL= and -DO2_VAL= respectively.
@@ -158,7 +167,7 @@ __kernel void activation_layer_qa8(
* @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
*/
-__kernel void activation_layer_logistic_qa8(
+__kernel void activation_layer_qa8(
TENSOR3D_DECLARATION(input)
#ifndef IN_PLACE
,
@@ -167,7 +176,7 @@ __kernel void activation_layer_logistic_qa8(
)
{
// Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
#ifdef IN_PLACE
Tensor3D output = input;
#else /* IN_PLACE */
@@ -177,13 +186,11 @@ __kernel void activation_layer_logistic_qa8(
// Load data
TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr);
- VEC_FLOAT data_flt = CONVERT(data, VEC_FLOAT);
- data_flt = round(data_flt - (float)O1_VAL) * ((float)S1_VAL);
- data_flt = 1.f / (1.f + exp(-data_flt));
-
- data = CONVERT_SAT(round(data_flt / ((float)SCALE_OUT)) + (float)OFFSET_OUT, TYPE);
+ data = PERFORM_ACTIVATION_QA8(ACT, data);
// Store result
VSTORE(VEC_SIZE)
(data, 0, (__global DATA_TYPE *)output.ptr);
}
+#endif // defined(ACT)
+#endif // defined(FLOAT_DOMAIN)