diff options
author | Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com> | 2023-06-30 15:43:29 +0100 |
---|---|---|
committer | Mohmun02 <MohammedSuhail.Munshi@arm.com> | 2023-07-06 09:49:03 +0000 |
commit | c9eeee5c84ad817360a1719c538c6e6c0812ec13 (patch) | |
tree | 6c80020617e83b0889e092d685940c7937f41d2c /src | |
parent | ce3c48c7af02555f81c0f5e7ef2677916cecef34 (diff) | |
download | ComputeLibrary-c9eeee5c84ad817360a1719c538c6e6c0812ec13.tar.gz |
Fix nightly failures in MatMulLowpNativeKernel when using bounded activation functions
- Added checks for supported activation functions in MatMulLowpKernel validate
- Replaced incorrect float activation macro with quantized implementation in mat_mul_quantized
Resolves: [COMPMID-6339]
Signed-off-by: Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com>
Change-Id: I15661f14877f1d3305644e6473feb5482a67e773
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/c/VisualCompute/ComputeLibrary/+/532858
Tested-by: bsgcomp <bsgcomp@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Comments-Addressed: bsgcomp <bsgcomp@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9855
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: SiCong Li <sicong.li@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/core/CL/cl_kernels/common/mat_mul_quantized.cl | 20 | ||||
-rw-r--r-- | src/core/CL/cl_kernels/tile_helpers.h | 26 | ||||
-rw-r--r-- | src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp | 18 |
3 files changed, 41 insertions, 23 deletions
diff --git a/src/core/CL/cl_kernels/common/mat_mul_quantized.cl b/src/core/CL/cl_kernels/common/mat_mul_quantized.cl index 8cf857dd84..7029af2188 100644 --- a/src/core/CL/cl_kernels/common/mat_mul_quantized.cl +++ b/src/core/CL/cl_kernels/common/mat_mul_quantized.cl @@ -34,6 +34,7 @@ * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3) * @note The fused activation function used should be passed with -DACTIVATION_TYPE, -DA_VAL and -DB_VAL are used for min and max output with the relu and bounded relu operations. + * @note The value of 0 in quantized format is equivalent to the quantization offset of the output data. This should be passed with -DZERO_POINT * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_QUANTIZED_NT_NT) * @note Only the following configurations of M0, N0 and K0 are currently supported: @@ -196,12 +197,12 @@ __kernel void mat_mul_native_quantized_nt_nt( const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; - T_ACTIVATION(int, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, acc, acc); - // Quantize the tile TILE(DATA_TYPE, M0, N0, accq); T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq); + T_ACTIVATION_QUANTIZED(DATA_TYPE, M0, N0, ACTIVATION_TYPE, ZERO_POINT, A_VAL, B_VAL, accq, accq); + TILE(int, M0, 1, indirect_buffer); LOOP_UNROLLING(int, _i, 0, 1, M0, { @@ -221,6 +222,7 @@ __kernel void mat_mul_native_quantized_nt_nt( * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3) * @note The fused activation function used should be passed with -DACTIVATION_TYPE, -DA_VAL and -DB_VAL are used for min and max output bounded activation functions. + * @note The value of 0 in quantized format is equivalent to the quantization offset of the output data. This should be passed with -DZERO_POINT * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_QUANTIZED_NT_T) * @note Only the following configurations of M0, N0 and K0 are currently supported: @@ -375,12 +377,12 @@ __kernel void mat_mul_native_quantized_nt_t( const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; - T_ACTIVATION(int, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, acc, acc); - // Quantize the tile TILE(DATA_TYPE, M0, N0, accq); T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq); + T_ACTIVATION_QUANTIZED(DATA_TYPE, M0, N0, ACTIVATION_TYPE, ZERO_POINT, A_VAL, B_VAL, accq, accq); + TILE(int, M0, 1, indirect_buffer); LOOP_UNROLLING(int, _i, 0, 1, M0, { @@ -400,6 +402,7 @@ __kernel void mat_mul_native_quantized_nt_t( * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3) * @note The fused activation function used should be passed with -DACTIVATION_TYPE, -DA_VAL and -DB_VAL are used for min and max output with the relu and bounded relu operations. + * @note The value of 0 in quantized format is equivalent to the quantization offset of the output data. This should be passed with -DZERO_POINT * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_QUANTIZED_T_NT) * @note Only the following configurations of M0, N0 and K0 are currently supported: @@ -556,12 +559,12 @@ __kernel void mat_mul_native_quantized_t_nt( const bool x_cond = PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0; const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; - T_ACTIVATION(int, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, acc, acc); - // Quantize the tile TILE(DATA_TYPE, M0, N0, accq); T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq); + T_ACTIVATION_QUANTIZED(DATA_TYPE, M0, N0, ACTIVATION_TYPE, ZERO_POINT, A_VAL, B_VAL, accq, accq); + TILE(int, M0, 1, indirect_buffer); LOOP_UNROLLING(int, _i, 0, 1, M0, { @@ -581,6 +584,7 @@ __kernel void mat_mul_native_quantized_t_nt( * @note The block's dimensions used for the LHS and RHS matrices (M0, N0 and K0) must be passed at compile time using -DN0, -DM0 and -DK0 (e.g. -DN0=8, -DM0=4, -DK0=4). * @note The number of leftover outputs rows/columns must be passed using -DPARTIAL_STORE_N0 and -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_N0=2, -DPARTIAL_STORE_M0=3) * @note The fused activation function used should be passed with -DACTIVATION_TYPE, -DA_VAL and -DB_VAL are used for min and max output with the relu and bounded relu operations. + * @note The value of 0 in quantized format is equivalent to the quantization offset of the output data. This should be passed with -DZERO_POINT * @note The dimension K must be passed at compile time using -DK (e.g. -DK=6) * @note The kernel name in uppercase must be passed at compile time (e.g. -DMAT_MUL_NATIVE_QUANTIZED_T_T) * @note Only the following configurations of M0, N0 and K0 are currently supported: @@ -742,11 +746,11 @@ __kernel void mat_mul_native_quantized_t_t( const bool y_cond = PARTIAL_STORE_M0 != 0 && get_global_id(1) == 0; // Quantize the tile - T_ACTIVATION(int, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, acc, acc); - TILE(DATA_TYPE, M0, N0, accq); T_QUANTIZE8_ASYMMETRIC(int, DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, acc, accq); + T_ACTIVATION_QUANTIZED(DATA_TYPE, M0, N0, ACTIVATION_TYPE, ZERO_POINT, A_VAL, B_VAL, accq, accq); + TILE(int, M0, 1, indirect_buffer); LOOP_UNROLLING(int, _i, 0, 1, M0, { diff --git a/src/core/CL/cl_kernels/tile_helpers.h b/src/core/CL/cl_kernels/tile_helpers.h index 85bd59afd4..8129606277 100644 --- a/src/core/CL/cl_kernels/tile_helpers.h +++ b/src/core/CL/cl_kernels/tile_helpers.h @@ -1144,19 +1144,21 @@ }) \ }) + +// NOTE : A_VAL and B_VAL should be quantized values (using same quantization info as x) // RELU Activation -#define relu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (max((DATA_TYPE)ZERO_VALUE, x)) +#define relu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) (max((DATA_TYPE)ZERO_POINT, x)) // Bounded RELU Activation -#define brelu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (min((DATA_TYPE)A_VAL, max((DATA_TYPE)ZERO_VALUE, x))) +#define brelu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) (min((DATA_TYPE)A_VAL, max((DATA_TYPE)ZERO_POINT, x))) // Lower Upper Bounded RELU Activation -#define lu_brelu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL)) +#define lu_brelu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL)) // Hard Swish Activation -#define hard_swish_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (x * ((min(max((DATA_TYPE)(x + (DATA_TYPE)3.f), (DATA_TYPE)0.f), (DATA_TYPE)6.f)) * (DATA_TYPE)0.166666667f)) +#define hard_swish_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) (x * ((min(max((DATA_TYPE)(x + (DATA_TYPE)3.f), (DATA_TYPE)0.f), (DATA_TYPE)6.f)) * (DATA_TYPE)0.166666667f)) // Identity Activation -#define identity_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (x) +#define identity_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) (x) -#define ACT_OP_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) op##_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) -#define ACTIVATION_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) ACT_OP_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) +#define ACT_OP_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) op##_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) +#define ACTIVATION_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) ACT_OP_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_POINT, A_VAL, B_VAL, x) #define V_ADD(A_VAL, B_VAL) ((A_VAL) + (B_VAL)) #define V_SUB(A_VAL, B_VAL) ((A_VAL) - (B_VAL)) @@ -1171,17 +1173,17 @@ * @param[in] M0 Number of SRC/DST rows * @param[in] N0 Number of SRC/DST columns * @param[in] ACTIVATION_TYPE Activation type - * @param[in] ZERO_VALUE The zero value to consider in the computation - * @param[in] A_VAL A value used for the activation (e.g. tanh_op, brelu,..) - * @param[in] B_VAL B value used for the activation (e.g. tanh_op, brelu,..) + * @param[in] ZERO_POINT The zero value to consider in the computation + * @param[in] A_VAL Quantized A value used for the activation (e.g. tanh_op, brelu,..) + * @param[in] B_VAL Quantized B value used for the activation (e.g. tanh_op, brelu,..) * @param[out] src SRC tile * @param[out] dst DST tile */ -#define T_ACTIVATION_QUANTIZED(DATA_TYPE, M0, N0, ACTIVATION_TYPE, ZERO_VALUE, A_VAL, B_VAL, src, dst) \ +#define T_ACTIVATION_QUANTIZED(DATA_TYPE, M0, N0, ACTIVATION_TYPE, ZERO_POINT, A_VAL, B_VAL, src, dst) \ ({ \ LOOP_UNROLLING(int, _m0, 0, 1, M0, \ { \ - dst[_m0].v = ACTIVATION_QUANTIZED(ACTIVATION_TYPE, DATA_TYPE, N0, ZERO_VALUE, A_VAL, B_VAL, src[_m0].v); \ + dst[_m0].v = ACTIVATION_QUANTIZED(ACTIVATION_TYPE, DATA_TYPE, N0, ZERO_POINT, A_VAL, B_VAL, src[_m0].v); \ }) \ }) diff --git a/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp index 38d78c618b..02c5754672 100644 --- a/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp +++ b/src/gpu/cl/kernels/ClMatMulLowpNativeKernel.cpp @@ -35,6 +35,8 @@ #include "src/core/helpers/WindowHelpers.h" #include "src/gpu/cl/ClCompileContext.h" +#include "arm_compute/core/QuantizationInfo.h" + #include "support/Cast.h" #include "support/StringSupport.h" @@ -100,13 +102,16 @@ ClMatMulLowpNativeKernel::ClMatMulLowpNativeKernel() } Status ClMatMulLowpNativeKernel::validate(const ITensorInfo *lhs, const ITensorInfo *rhs, const ITensorInfo *dst, const MatMulKernelInfo &matmul_kernel_info, const ActivationLayerInfo &act_info) { - ARM_COMPUTE_UNUSED(act_info); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs, dst); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs); ARM_COMPUTE_RETURN_ON_ERROR(validate_matmul_kernel_info(matmul_kernel_info)); ARM_COMPUTE_RETURN_ON_ERROR(validate_input_shapes(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((act_info.activation() != ActivationFunction::IDENTITY && act_info.activation() != ActivationFunction::RELU + && act_info.activation() != ActivationFunction::LU_BOUNDED_RELU && act_info.activation() != ActivationFunction::BOUNDED_RELU), + "Activation Function specified is unsupported."); + if(dst->total_size() != 0) { const TensorInfo tensor_info_output = dst->clone()->set_tensor_shape(misc::shape_calculator::compute_matmul_shape(lhs->tensor_shape(), rhs->tensor_shape(), matmul_kernel_info)); @@ -169,9 +174,16 @@ void ClMatMulLowpNativeKernel::configure(const ClCompileContext &compile_context build_opts.add_option("-DRHS_OFFSET=" + support::cpp11::to_string(rqinfo.offset)); build_opts.add_option("-DDST_OFFSET=" + support::cpp11::to_string(dqinfo.offset)); // Passed as positive (unlike the above two) - build_opts.add_option(("-DA_VAL=" + float_to_string_with_full_precision(act_info.a()))); - build_opts.add_option(("-DB_VAL=" + float_to_string_with_full_precision(act_info.b()))); + // Floating point boundaries are quantized prior to being passed as arguments. + // Note: We expect the input and output tensors to always adopt a per-tensor quantization approach + int a_val{}; + int b_val{}; + std::tie(b_val, a_val) = get_quantized_activation_min_max(act_info, dst->data_type(), dqinfo); + + build_opts.add_option("-DA_VAL=" + support::cpp11::to_string(a_val)); + build_opts.add_option("-DB_VAL=" + support::cpp11::to_string(b_val)); build_opts.add_option("-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(act_info.activation()))); + build_opts.add_option("-DZERO_POINT=" + support::cpp11::to_string(dqinfo.offset)); std::string kernel_name("mat_mul_native_quantized"); kernel_name += matmul_kernel_info.adj_lhs ? "_t" : "_nt"; |