diff options
author | Manuel Bottini <manuel.bottini@arm.com> | 2019-06-26 16:23:03 +0100 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2019-07-09 09:24:21 +0000 |
commit | 30dbeef2f46bdd6fe05d25dfa27cb4b2359dced3 (patch) | |
tree | 33e12ced1dca23b79212b6afd64950ed4a40363b /src/core/CL/cl_kernels | |
parent | ebdde65530c8819a16d558fc5ebb3cc519fbc344 (diff) | |
download | ComputeLibrary-30dbeef2f46bdd6fe05d25dfa27cb4b2359dced3.tar.gz |
COMPMID-2411: Add (logistic and tanh) activation support for QSYMM16 for CL
Change-Id: I8d72490b1cc58563ba7b94664135586bc40e6526
Signed-off-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1466
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/core/CL/cl_kernels')
-rw-r--r-- | src/core/CL/cl_kernels/activation_layer_quant.cl (renamed from src/core/CL/cl_kernels/activation_layer_qa8.cl) | 80 | ||||
-rw-r--r-- | src/core/CL/cl_kernels/activation_quant_helpers.h | 84 | ||||
-rw-r--r-- | src/core/CL/cl_kernels/depthwise_convolution_quantized.cl | 4 |
3 files changed, 107 insertions, 61 deletions
diff --git a/src/core/CL/cl_kernels/activation_layer_qa8.cl b/src/core/CL/cl_kernels/activation_layer_quant.cl index 41f23ca79b..ebd3408b23 100644 --- a/src/core/CL/cl_kernels/activation_layer_qa8.cl +++ b/src/core/CL/cl_kernels/activation_layer_quant.cl @@ -21,9 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "helpers.h" +#include "activation_quant_helpers.h" -#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) #define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE) #if defined(FLOAT_DOMAIN) @@ -31,15 +30,7 @@ #include "activation_float_helpers.h" -#if defined(O2_VAL) && defined(S2_VAL) -#define OFFSET_OUT O2_VAL -#define SCALE_OUT S2_VAL -#else // defined(O2_VAL) && defined(S2_VAL) -#define OFFSET_OUT O1_VAL -#define SCALE_OUT S1_VAL -#endif // defined(O2_VAL) && defined(S2_VAL) - -/** This performs an activation function on QASYMM8 inputs with float transformations. +/** This performs an activation function on quantized inputs with float transformations. * * @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * @@ -47,10 +38,10 @@ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively. * @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively. - * @note Quantization offsets of the input/output tensors are passed in with -DO1_VAL= and -DO2_VAL= respectively. + * @note Quantization offsets of the input/output tensors are passed in only if asymmetric with -DO1_VAL= and -DO2_VAL= respectively. * @note Quantized value of constant zero should be given as a preprocessor argument using -DCONST_0=value. e.g. -DCONST_0=128. * - * @param[in] input_ptr Pointer to the source image. Supported data types: QASYMM8 + * @param[in] input_ptr Pointer to the source image. Supported data types: QASYMM8/QSYMM16 * @param[in] input_stride_x Stride of the source image in X dimension (in bytes) * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes) @@ -67,7 +58,7 @@ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image */ -__kernel void activation_layer_qa8_f32( +__kernel void activation_layer_quant_f32( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -87,10 +78,18 @@ __kernel void activation_layer_qa8_f32( TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr); VEC_FLOAT data_flt = CONVERT(data, VEC_FLOAT); - data_flt = round(data_flt - (float)O1_VAL) * ((float)S1_VAL); - data_flt = ACTIVATION(ACT, float, data_flt, A_VAL, B_VAL); - - data = CONVERT_SAT(round(data_flt / ((float)SCALE_OUT)) + (float)OFFSET_OUT, TYPE); +#if defined(O1_VAL) + data_flt = round(data_flt - (float)O1_VAL) * ((float)S1_VAL); +#else // defined(O1_VAL) + data_flt = round(data_flt) * ((float)S1_VAL); +#endif // defined(O1_VAL) + data_flt = ACTIVATION(ACT, float, data_flt, A_VAL, B_VAL); + +#if defined(O2_VAL) + data = CONVERT_SAT(round(data_flt / ((float)S2_VAL)) + (float)O2_VAL, TYPE); +#else // defined(O2_VAL) + data = CONVERT_SAT(round(data_flt / ((float)S2_VAL)), TYPE); +#endif // defined(O2_VAL) // Store result VSTORE(VEC_SIZE) @@ -100,45 +99,8 @@ __kernel void activation_layer_qa8_f32( #else // defined(FLOAT_DOMAIN) // Activations performed in the quantized domain -// RELU Activation -inline TYPE relu_op(TYPE x) -{ - return max((TYPE)CONST_0, x); -} -// Bounded RELU Activation -inline TYPE brelu_op(TYPE x) -{ - return min((TYPE)A_VAL, max(CONST_0, x)); -} -// Lower Upper Bounded RELU Activation -inline TYPE lu_brelu_op(TYPE x) -{ - return min(max(x, (TYPE)B_VAL), (TYPE)A_VAL); -} - -#define ACTIVATION_OP2(op, x) op##_op(x) -#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x) - -#if defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL) -#define PERFORM_ACTIVATION_QA8(act, data) \ - ({ \ - data = ACTIVATION_OP(act, data); \ - \ - VEC_DATA_TYPE(float, VEC_SIZE) \ - fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)); \ - \ - fdata = round((fdata - (float)O1_VAL) * ((float)S1_VAL / (float)S2_VAL) + (float)O2_VAL); \ - data = CONVERT_SAT(fdata, VEC_DATA_TYPE(uchar, VEC_SIZE)); \ - }) -#else /* defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL) */ -#define PERFORM_ACTIVATION_QA8(act, data) \ - ({ \ - data = ACTIVATION_OP(act, data); \ - }) -#endif /* defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL) */ - #if defined(ACT) -/** This performs an activation function on QASYMM8 inputs. +/** This performs an activation function on quantized inputs. * * @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time * @@ -150,7 +112,7 @@ inline TYPE lu_brelu_op(TYPE x) * @note Quantization offsets of the input/output tensors are passed in with -DO1_VAL= and -DO2_VAL= respectively. * @note Quantized value of constant zero should be given as a preprocessor argument using -DCONST_0=value. e.g. -DCONST_0=128. * - * @param[in] input_ptr Pointer to the source image. Supported data types: QASYMM8 + * @param[in] input_ptr Pointer to the source image. Supported data types: QASYMM8/QSYMM16 * @param[in] input_stride_x Stride of the source image in X dimension (in bytes) * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes) @@ -167,7 +129,7 @@ inline TYPE lu_brelu_op(TYPE x) * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image */ -__kernel void activation_layer_qa8( +__kernel void activation_layer_quant( TENSOR3D_DECLARATION(input) #ifndef IN_PLACE , @@ -186,7 +148,7 @@ __kernel void activation_layer_qa8( // Load data TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr); - data = PERFORM_ACTIVATION_QA8(ACT, data); + data = PERFORM_ACTIVATION_QUANT(ACT, data); // Store result VSTORE(VEC_SIZE) diff --git a/src/core/CL/cl_kernels/activation_quant_helpers.h b/src/core/CL/cl_kernels/activation_quant_helpers.h new file mode 100644 index 0000000000..402e7ac41f --- /dev/null +++ b/src/core/CL/cl_kernels/activation_quant_helpers.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "helpers.h" + +#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) + +#if defined(S1_VAL) && !defined(S2_VAL) +#define S2_VAL S1_VAL +#endif // defined(S1_VAL) && !defined(S2_VAL) +#if defined(O1_VAL) && !defined(O2_VAL) +#define O2_VAL O1_VAL +#endif // defined(O1_VAL) && !defined(O2_VAL) + +// RELU Activation +inline TYPE relu_op(TYPE x) +{ + return max((TYPE)CONST_0, x); +} +// Bounded RELU Activation +inline TYPE brelu_op(TYPE x) +{ + return min((TYPE)A_VAL, max(CONST_0, x)); +} +// Lower Upper Bounded RELU Activation +inline TYPE lu_brelu_op(TYPE x) +{ + return min(max(x, (TYPE)B_VAL), (TYPE)A_VAL); +} + +#define ACTIVATION_OP2(op, x) op##_op(x) +#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x) + +#if defined(S1_VAL) && defined(S2_VAL) +#if defined(O1_VAL) && defined(O2_VAL) +#define PERFORM_ACTIVATION_QUANT(act, data) \ + ({ \ + data = ACTIVATION_OP(act, data); \ + \ + VEC_DATA_TYPE(float, VEC_SIZE) \ + fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)); \ + \ + fdata = round((fdata - (float)O1_VAL) * ((float)S1_VAL / (float)S2_VAL) + (float)O2_VAL); \ + data = CONVERT_SAT(fdata, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)); \ + }) +#else // defined(O1_VAL) && defined(O2_VAL) +#define PERFORM_ACTIVATION_QUANT(act, data) \ + ({ \ + data = ACTIVATION_OP(act, data); \ + \ + VEC_DATA_TYPE(float, VEC_SIZE) \ + fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)); \ + \ + fdata = round((fdata) * ((float)S1_VAL / (float)S2_VAL)); \ + data = CONVERT_SAT(fdata, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)); \ + }) +#endif /* defined(O1_VAL) && defined(O2_VAL) */ +#else /* defined(S1_VAL) && defined(S2_VAL) */ +#define PERFORM_ACTIVATION_QUANT(act, data) \ + ({ \ + data = ACTIVATION_OP(act, data); \ + }) +#endif /* defined(S1_VAL) && defined(S2_VAL) */
\ No newline at end of file diff --git a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl index 13568b035d..8f2e441693 100644 --- a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl +++ b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl @@ -31,8 +31,8 @@ #ifndef VEC_SIZE #define VEC_SIZE 8 #endif /* VEC_SIZE */ -#include "activation_layer_qa8.cl" -#define ACTIVATION_FUNC(x) PERFORM_ACTIVATION_QA8(ACTIVATION_TYPE, x) +#include "activation_layer_quant.cl" +#define ACTIVATION_FUNC(x) PERFORM_ACTIVATION_QUANT(ACTIVATION_TYPE, x) #else /* defined(ACTIVATION_TYPE) && defined(CONST_0) */ #define ACTIVATION_FUNC(x) (x) #endif /* defined(ACTIVATION_TYPE) && defined(CONST_0) */ |