From 30dbeef2f46bdd6fe05d25dfa27cb4b2359dced3 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Wed, 26 Jun 2019 16:23:03 +0100 Subject: COMPMID-2411: Add (logistic and tanh) activation support for QSYMM16 for CL Change-Id: I8d72490b1cc58563ba7b94664135586bc40e6526 Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/1466 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: Georgios Pinitas --- .../core/CL/kernels/CLActivationLayerKernel.h | 6 +- .../runtime/CL/functions/CLActivationLayer.h | 6 +- src/core/CL/CLKernelLibrary.cpp | 8 +- src/core/CL/cl_kernels/activation_layer_qa8.cl | 196 --------------------- src/core/CL/cl_kernels/activation_layer_quant.cl | 158 +++++++++++++++++ src/core/CL/cl_kernels/activation_quant_helpers.h | 84 +++++++++ .../cl_kernels/depthwise_convolution_quantized.cl | 4 +- src/core/CL/kernels/CLActivationLayerKernel.cpp | 36 ++-- tests/validation/CL/ActivationLayer.cpp | 31 +++- tests/validation/fixtures/ActivationLayerFixture.h | 4 +- 10 files changed, 309 insertions(+), 224 deletions(-) delete mode 100644 src/core/CL/cl_kernels/activation_layer_qa8.cl create mode 100644 src/core/CL/cl_kernels/activation_layer_quant.cl create mode 100644 src/core/CL/cl_kernels/activation_quant_helpers.h diff --git a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h index 12d00de7e8..f20d6c3362 100644 --- a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018 ARM Limited. + * Copyright (c) 2016-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -51,7 +51,7 @@ public: * @note If the output tensor is a nullptr, the activation function will be performed in-place * * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result - * of the activation function. Data types supported: QASYMM8/F16/F32. + * of the activation function. Data types supported: QASYMM8/QSYMM16/F16/F32. * @param[out] output Destination tensor. Data type supported: same as @p input * @param[in] act_info Activation layer information. */ @@ -59,7 +59,7 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref CLActivationLayerKernel * * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result - * of the activation function. Data types supported: QASYMM8/F16/F32. + * of the activation function. Data types supported: QASYMM8/QSYMM16/F16/F32. * @param[in] output Destination tensor info. Data type supported: same as @p input * @param[in] act_info Activation layer information. * diff --git a/arm_compute/runtime/CL/functions/CLActivationLayer.h b/arm_compute/runtime/CL/functions/CLActivationLayer.h index e98fa4bf48..c10c5301c2 100644 --- a/arm_compute/runtime/CL/functions/CLActivationLayer.h +++ b/arm_compute/runtime/CL/functions/CLActivationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018 ARM Limited. + * Copyright (c) 2016-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -44,7 +44,7 @@ public: * @note If the output tensor is a nullptr or is equal to the input, the activation function will be performed in-place * * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result - * of the activation function. Data types supported: QASYMM8/F16/F32. + * of the activation function. Data types supported: QASYMM8/QSYMM16/F16/F32. * @param[out] output Destination tensor. Data type supported: same as @p input * @param[in] act_info Activation layer parameters. */ @@ -52,7 +52,7 @@ public: /** Static function to check if given info will lead to a valid configuration of @ref CLActivationLayer * * @param[in] input Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result - * of the activation function. Data types supported: QASYMM8/F16/F32. + * of the activation function. Data types supported: QASYMM8/QSYMM16/F16/F32. * @param[in] output Destination tensor info. Data type supported: same as @p input * @param[in] act_info Activation layer information. * diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index db57bb93a6..36d8bed5b9 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -148,8 +148,8 @@ const std::map CLKernelLibrary::_kernel_program_map = { "accumulate_squared", "accumulate.cl" }, { "accumulate_weighted", "accumulate.cl" }, { "activation_layer", "activation_layer.cl" }, - { "activation_layer_qa8", "activation_layer_qa8.cl" }, - { "activation_layer_qa8_f32", "activation_layer_qa8.cl" }, + { "activation_layer_quant", "activation_layer_quant.cl" }, + { "activation_layer_quant_f32", "activation_layer_quant.cl" }, { "batch_to_space_nchw", "batch_to_space.cl" }, { "batch_to_space_static_nchw", "batch_to_space.cl" }, { "batch_to_space_nhwc", "batch_to_space.cl" }, @@ -576,8 +576,8 @@ const std::map CLKernelLibrary::_program_source_map = #include "./cl_kernels/activation_layer.clembed" }, { - "activation_layer_qa8.cl", -#include "./cl_kernels/activation_layer_qa8.clembed" + "activation_layer_quant.cl", +#include "./cl_kernels/activation_layer_quant.clembed" }, { "batch_to_space.cl", diff --git a/src/core/CL/cl_kernels/activation_layer_qa8.cl b/src/core/CL/cl_kernels/activation_layer_qa8.cl deleted file mode 100644 index 41f23ca79b..0000000000 --- a/src/core/CL/cl_kernels/activation_layer_qa8.cl +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Copyright (c) 2016-2019 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "helpers.h" - -#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) -#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE) - -#if defined(FLOAT_DOMAIN) -// Activations performed in the float domain - -#include "activation_float_helpers.h" - -#if defined(O2_VAL) && defined(S2_VAL) -#define OFFSET_OUT O2_VAL -#define SCALE_OUT S2_VAL -#else // defined(O2_VAL) && defined(S2_VAL) -#define OFFSET_OUT O1_VAL -#define SCALE_OUT S1_VAL -#endif // defined(O2_VAL) && defined(S2_VAL) - -/** This performs an activation function on QASYMM8 inputs with float transformations. - * - * @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time - * - * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short - * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 - * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively. - * @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively. - * @note Quantization offsets of the input/output tensors are passed in with -DO1_VAL= and -DO2_VAL= respectively. - * @note Quantized value of constant zero should be given as a preprocessor argument using -DCONST_0=value. e.g. -DCONST_0=128. - * - * @param[in] input_ptr Pointer to the source image. Supported data types: QASYMM8 - * @param[in] input_stride_x Stride of the source image in X dimension (in bytes) - * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes) - * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source image - * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr - * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes) - * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes) - * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes) - * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image - */ -__kernel void activation_layer_qa8_f32( - TENSOR3D_DECLARATION(input) -#ifndef IN_PLACE - , - TENSOR3D_DECLARATION(output) -#endif /* not IN_PLACE */ -) -{ - // Get pixels pointer - Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); -#ifdef IN_PLACE - Tensor3D output = input; -#else /* IN_PLACE */ - Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); -#endif /* IN_PLACE */ - - // Load data - TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr); - - VEC_FLOAT data_flt = CONVERT(data, VEC_FLOAT); - data_flt = round(data_flt - (float)O1_VAL) * ((float)S1_VAL); - data_flt = ACTIVATION(ACT, float, data_flt, A_VAL, B_VAL); - - data = CONVERT_SAT(round(data_flt / ((float)SCALE_OUT)) + (float)OFFSET_OUT, TYPE); - - // Store result - VSTORE(VEC_SIZE) - (data, 0, (__global DATA_TYPE *)output.ptr); -} - -#else // defined(FLOAT_DOMAIN) -// Activations performed in the quantized domain - -// RELU Activation -inline TYPE relu_op(TYPE x) -{ - return max((TYPE)CONST_0, x); -} -// Bounded RELU Activation -inline TYPE brelu_op(TYPE x) -{ - return min((TYPE)A_VAL, max(CONST_0, x)); -} -// Lower Upper Bounded RELU Activation -inline TYPE lu_brelu_op(TYPE x) -{ - return min(max(x, (TYPE)B_VAL), (TYPE)A_VAL); -} - -#define ACTIVATION_OP2(op, x) op##_op(x) -#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x) - -#if defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL) -#define PERFORM_ACTIVATION_QA8(act, data) \ - ({ \ - data = ACTIVATION_OP(act, data); \ - \ - VEC_DATA_TYPE(float, VEC_SIZE) \ - fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)); \ - \ - fdata = round((fdata - (float)O1_VAL) * ((float)S1_VAL / (float)S2_VAL) + (float)O2_VAL); \ - data = CONVERT_SAT(fdata, VEC_DATA_TYPE(uchar, VEC_SIZE)); \ - }) -#else /* defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL) */ -#define PERFORM_ACTIVATION_QA8(act, data) \ - ({ \ - data = ACTIVATION_OP(act, data); \ - }) -#endif /* defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL) */ - -#if defined(ACT) -/** This performs an activation function on QASYMM8 inputs. - * - * @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time - * - * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short - * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 - * @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH - * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively. - * @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively. - * @note Quantization offsets of the input/output tensors are passed in with -DO1_VAL= and -DO2_VAL= respectively. - * @note Quantized value of constant zero should be given as a preprocessor argument using -DCONST_0=value. e.g. -DCONST_0=128. - * - * @param[in] input_ptr Pointer to the source image. Supported data types: QASYMM8 - * @param[in] input_stride_x Stride of the source image in X dimension (in bytes) - * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes) - * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source image - * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr - * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes) - * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes) - * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes) - * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) - * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image - */ -__kernel void activation_layer_qa8( - TENSOR3D_DECLARATION(input) -#ifndef IN_PLACE - , - TENSOR3D_DECLARATION(output) -#endif /* not IN_PLACE */ -) -{ - // Get pixels pointer - Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); -#ifdef IN_PLACE - Tensor3D output = input; -#else /* IN_PLACE */ - Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); -#endif /* IN_PLACE */ - - // Load data - TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr); - - data = PERFORM_ACTIVATION_QA8(ACT, data); - - // Store result - VSTORE(VEC_SIZE) - (data, 0, (__global DATA_TYPE *)output.ptr); -} -#endif // defined(ACT) -#endif // defined(FLOAT_DOMAIN) diff --git a/src/core/CL/cl_kernels/activation_layer_quant.cl b/src/core/CL/cl_kernels/activation_layer_quant.cl new file mode 100644 index 0000000000..ebd3408b23 --- /dev/null +++ b/src/core/CL/cl_kernels/activation_layer_quant.cl @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2016-2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "activation_quant_helpers.h" + +#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE) + +#if defined(FLOAT_DOMAIN) +// Activations performed in the float domain + +#include "activation_float_helpers.h" + +/** This performs an activation function on quantized inputs with float transformations. + * + * @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time + * + * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short + * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 + * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively. + * @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively. + * @note Quantization offsets of the input/output tensors are passed in only if asymmetric with -DO1_VAL= and -DO2_VAL= respectively. + * @note Quantized value of constant zero should be given as a preprocessor argument using -DCONST_0=value. e.g. -DCONST_0=128. + * + * @param[in] input_ptr Pointer to the source image. Supported data types: QASYMM8/QSYMM16 + * @param[in] input_stride_x Stride of the source image in X dimension (in bytes) + * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes) + * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source image + * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr + * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes) + * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes) + * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes) + * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image + */ +__kernel void activation_layer_quant_f32( + TENSOR3D_DECLARATION(input) +#ifndef IN_PLACE + , + TENSOR3D_DECLARATION(output) +#endif /* not IN_PLACE */ +) +{ + // Get pixels pointer + Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); +#ifdef IN_PLACE + Tensor3D output = input; +#else /* IN_PLACE */ + Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); +#endif /* IN_PLACE */ + + // Load data + TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr); + + VEC_FLOAT data_flt = CONVERT(data, VEC_FLOAT); +#if defined(O1_VAL) + data_flt = round(data_flt - (float)O1_VAL) * ((float)S1_VAL); +#else // defined(O1_VAL) + data_flt = round(data_flt) * ((float)S1_VAL); +#endif // defined(O1_VAL) + data_flt = ACTIVATION(ACT, float, data_flt, A_VAL, B_VAL); + +#if defined(O2_VAL) + data = CONVERT_SAT(round(data_flt / ((float)S2_VAL)) + (float)O2_VAL, TYPE); +#else // defined(O2_VAL) + data = CONVERT_SAT(round(data_flt / ((float)S2_VAL)), TYPE); +#endif // defined(O2_VAL) + + // Store result + VSTORE(VEC_SIZE) + (data, 0, (__global DATA_TYPE *)output.ptr); +} + +#else // defined(FLOAT_DOMAIN) +// Activations performed in the quantized domain + +#if defined(ACT) +/** This performs an activation function on quantized inputs. + * + * @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time + * + * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short + * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 + * @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH + * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively. + * @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively. + * @note Quantization offsets of the input/output tensors are passed in with -DO1_VAL= and -DO2_VAL= respectively. + * @note Quantized value of constant zero should be given as a preprocessor argument using -DCONST_0=value. e.g. -DCONST_0=128. + * + * @param[in] input_ptr Pointer to the source image. Supported data types: QASYMM8/QSYMM16 + * @param[in] input_stride_x Stride of the source image in X dimension (in bytes) + * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes) + * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source image + * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr + * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes) + * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes) + * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes) + * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image + */ +__kernel void activation_layer_quant( + TENSOR3D_DECLARATION(input) +#ifndef IN_PLACE + , + TENSOR3D_DECLARATION(output) +#endif /* not IN_PLACE */ +) +{ + // Get pixels pointer + Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); +#ifdef IN_PLACE + Tensor3D output = input; +#else /* IN_PLACE */ + Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); +#endif /* IN_PLACE */ + + // Load data + TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr); + + data = PERFORM_ACTIVATION_QUANT(ACT, data); + + // Store result + VSTORE(VEC_SIZE) + (data, 0, (__global DATA_TYPE *)output.ptr); +} +#endif // defined(ACT) +#endif // defined(FLOAT_DOMAIN) diff --git a/src/core/CL/cl_kernels/activation_quant_helpers.h b/src/core/CL/cl_kernels/activation_quant_helpers.h new file mode 100644 index 0000000000..402e7ac41f --- /dev/null +++ b/src/core/CL/cl_kernels/activation_quant_helpers.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "helpers.h" + +#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) + +#if defined(S1_VAL) && !defined(S2_VAL) +#define S2_VAL S1_VAL +#endif // defined(S1_VAL) && !defined(S2_VAL) +#if defined(O1_VAL) && !defined(O2_VAL) +#define O2_VAL O1_VAL +#endif // defined(O1_VAL) && !defined(O2_VAL) + +// RELU Activation +inline TYPE relu_op(TYPE x) +{ + return max((TYPE)CONST_0, x); +} +// Bounded RELU Activation +inline TYPE brelu_op(TYPE x) +{ + return min((TYPE)A_VAL, max(CONST_0, x)); +} +// Lower Upper Bounded RELU Activation +inline TYPE lu_brelu_op(TYPE x) +{ + return min(max(x, (TYPE)B_VAL), (TYPE)A_VAL); +} + +#define ACTIVATION_OP2(op, x) op##_op(x) +#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x) + +#if defined(S1_VAL) && defined(S2_VAL) +#if defined(O1_VAL) && defined(O2_VAL) +#define PERFORM_ACTIVATION_QUANT(act, data) \ + ({ \ + data = ACTIVATION_OP(act, data); \ + \ + VEC_DATA_TYPE(float, VEC_SIZE) \ + fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)); \ + \ + fdata = round((fdata - (float)O1_VAL) * ((float)S1_VAL / (float)S2_VAL) + (float)O2_VAL); \ + data = CONVERT_SAT(fdata, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)); \ + }) +#else // defined(O1_VAL) && defined(O2_VAL) +#define PERFORM_ACTIVATION_QUANT(act, data) \ + ({ \ + data = ACTIVATION_OP(act, data); \ + \ + VEC_DATA_TYPE(float, VEC_SIZE) \ + fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)); \ + \ + fdata = round((fdata) * ((float)S1_VAL / (float)S2_VAL)); \ + data = CONVERT_SAT(fdata, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)); \ + }) +#endif /* defined(O1_VAL) && defined(O2_VAL) */ +#else /* defined(S1_VAL) && defined(S2_VAL) */ +#define PERFORM_ACTIVATION_QUANT(act, data) \ + ({ \ + data = ACTIVATION_OP(act, data); \ + }) +#endif /* defined(S1_VAL) && defined(S2_VAL) */ \ No newline at end of file diff --git a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl index 13568b035d..8f2e441693 100644 --- a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl +++ b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl @@ -31,8 +31,8 @@ #ifndef VEC_SIZE #define VEC_SIZE 8 #endif /* VEC_SIZE */ -#include "activation_layer_qa8.cl" -#define ACTIVATION_FUNC(x) PERFORM_ACTIVATION_QA8(ACTIVATION_TYPE, x) +#include "activation_layer_quant.cl" +#define ACTIVATION_FUNC(x) PERFORM_ACTIVATION_QUANT(ACTIVATION_TYPE, x) #else /* defined(ACTIVATION_TYPE) && defined(CONST_0) */ #define ACTIVATION_FUNC(x) (x) #endif /* defined(ACTIVATION_TYPE) && defined(CONST_0) */ diff --git a/src/core/CL/kernels/CLActivationLayerKernel.cpp b/src/core/CL/kernels/CLActivationLayerKernel.cpp index 34d1298d61..97a0ff6c6c 100644 --- a/src/core/CL/kernels/CLActivationLayerKernel.cpp +++ b/src/core/CL/kernels/CLActivationLayerKernel.cpp @@ -46,9 +46,9 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info) { ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QASYMM8, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QASYMM8, DataType::QSYMM16, DataType::F16, DataType::F32); - static std::set qs8_supported_activations = + static std::set quantized_supported_activations = { ActivationLayerInfo::ActivationFunction::RELU, ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, @@ -60,11 +60,15 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c const QuantizationInfo &oq_info = (output != nullptr) ? output->quantization_info() : input->quantization_info(); const ActivationLayerInfo::ActivationFunction f_act = act_info.activation(); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized_asymmetric(data_type) && (qs8_supported_activations.count(f_act) == 0), - "For QASYMM8 only tanh, logistic, relu and lower/upper bounded relu are supported"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized(data_type) && (quantized_supported_activations.count(f_act) == 0), + "For Quantized data type only tanh, logistic, relu and lower/upper bounded relu are supported"); + ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::TANH) && (oq_info != QuantizationInfo(1.f / 128.f, 128))); ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) && (oq_info != QuantizationInfo(1.f / 256.f, 0))); + ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_symmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::TANH) && (oq_info != QuantizationInfo(1.f / 32768.f, 0))); + ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_symmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) && (oq_info != QuantizationInfo(1.f / 32768.f, 0))); + // Checks performed when output is configured if((output != nullptr) && (output->total_size() != 0)) { @@ -135,16 +139,22 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act int b_const_int = 0; const ActivationLayerInfo::ActivationFunction f_act = act_info.activation(); - const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(dt); + const bool is_quantized = is_data_type_quantized(dt); const bool perform_activation_in_float = (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) || (f_act == ActivationLayerInfo::ActivationFunction::TANH); // Create quantized version of constants a, b if needed - if(is_quantized_asymmetric) + if(dt == DataType::QASYMM8) { const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform(); a_const_int = quantize_qasymm8(a_const, iq_info); b_const_int = quantize_qasymm8(b_const, iq_info); } + else if(dt == DataType::QSYMM16) + { + const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform(); + a_const_int = quantize_qsymm16(a_const, iq_info); + b_const_int = quantize_qsymm16(b_const, iq_info); + } // Set build options CLBuildOptions build_opts; @@ -155,7 +165,7 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act build_opts.add_option(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration))); // Set A, B constants in build options - if(is_quantized_asymmetric && !perform_activation_in_float) + if(is_quantized && !perform_activation_in_float) { build_opts.add_option(("-DA_VAL=" + support::cpp11::to_string(a_const_int))); build_opts.add_option(("-DB_VAL=" + support::cpp11::to_string(b_const_int))); @@ -167,14 +177,14 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act } // Set quantization info build options - if(is_quantized_asymmetric) + if(is_quantized) { const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform(); // Quantized value of 0 corresponds to the offset o1 - build_opts.add_option(("-DCONST_0=" + support::cpp11::to_string(iq_info.offset))); + build_opts.add_option(("-DCONST_0=" + (is_data_type_quantized_asymmetric(dt) ? support::cpp11::to_string(iq_info.offset) : "0"))); build_opts.add_option(("-DS1_VAL=" + float_to_string_with_full_precision(iq_info.scale))); - build_opts.add_option(("-DO1_VAL=" + support::cpp11::to_string(iq_info.offset))); + build_opts.add_option_if(is_data_type_quantized_asymmetric(dt), "-DO1_VAL=" + support::cpp11::to_string(iq_info.offset)); // Set scale and offset of the input and output if they have different quantization info if(output != nullptr) @@ -184,16 +194,16 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act if(iq_info != oq_info) { build_opts.add_option(("-DS2_VAL=" + float_to_string_with_full_precision(oq_info.scale))); - build_opts.add_option(("-DO2_VAL=" + support::cpp11::to_string(oq_info.offset))); + build_opts.add_option_if(is_data_type_quantized_asymmetric(dt), "-DO2_VAL=" + support::cpp11::to_string(oq_info.offset)); } } } // Create kernel std::string kernel_name = std::string("activation_layer"); - if(is_quantized_asymmetric) + if(is_quantized) { - kernel_name += perform_activation_in_float ? std::string("_qa8_f32") : std::string("_qa8"); + kernel_name += perform_activation_in_float ? std::string("_quant_f32") : std::string("_quant"); } _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp index 45c2e0e683..fd203ccb7e 100644 --- a/tests/validation/CL/ActivationLayer.cpp +++ b/tests/validation/CL/ActivationLayer.cpp @@ -43,6 +43,8 @@ namespace validation { namespace { +constexpr AbsoluteTolerance tolerance_qsymm16(1.f); + /** Define tolerance of the activation layer. * * @param[in] activation The activation function used. @@ -139,6 +141,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Invalid quantization info TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), // Invalid activation function for QSYMM16 }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), @@ -146,6 +151,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), @@ -153,8 +161,11 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), - framework::dataset::make("Expected", { false, false, true, true, false, false })), + framework::dataset::make("Expected", { false, false, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected) { ARM_COMPUTE_EXPECT(bool(CLActivationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), act_info)) == expected, framework::LogLevel::ERRORS); @@ -228,6 +239,24 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerQuantizedFixture, fra validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); } TEST_SUITE_END() // QASYMM8 +TEST_SUITE(QSYMM16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset), + framework::dataset::make("DataType", + DataType::QSYMM16)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qsymm16); +} +FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), QuantizedActivationDataset), + framework::dataset::make("DataType", + DataType::QSYMM16)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qsymm16); +} +TEST_SUITE_END() // QSYMM16 TEST_SUITE_END() // Quantized TEST_SUITE_END() // ActivationLayer diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h index 4aaf8e7ce3..d9f26b7368 100644 --- a/tests/validation/fixtures/ActivationLayerFixture.h +++ b/tests/validation/fixtures/ActivationLayerFixture.h @@ -73,7 +73,7 @@ protected: std::uniform_real_distribution<> distribution(min_bound, max_bound); library->fill(tensor, distribution, 0); } - else if(is_data_type_quantized_asymmetric(tensor.data_type()) || (is_data_type_quantized_symmetric(tensor.data_type()))) + else if(is_data_type_quantized(tensor.data_type())) { library->fill_tensor_uniform(tensor, 0); } @@ -96,7 +96,7 @@ protected: // Create and configure function FunctionType act_layer; - TensorType *dst_ptr = _in_place ? &src : &dst; + TensorType *dst_ptr = _in_place ? nullptr : &dst; act_layer.configure(&src, dst_ptr, info); -- cgit v1.2.1