aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/CL/cl_kernels/convolution_layer.cl6
-rw-r--r--src/core/CL/cl_kernels/depth_convert.cl12
-rw-r--r--src/core/CL/cl_kernels/gemmlowp.cl202
-rw-r--r--src/core/CL/kernels/CLDepthConvertLayerKernel.cpp10
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp3
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp1
-rw-r--r--src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp79
-rw-r--r--src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLWeightsReshapeKernel.cpp8
-rw-r--r--src/core/utils/quantization/AsymmHelpers.cpp16
-rw-r--r--src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp16
-rw-r--r--src/runtime/CL/functions/CLFullyConnectedLayer.cpp2
-rw-r--r--src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp112
-rw-r--r--src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp106
14 files changed, 376 insertions, 201 deletions
diff --git a/src/core/CL/cl_kernels/convolution_layer.cl b/src/core/CL/cl_kernels/convolution_layer.cl
index 2b75b45fe1..874b78ebdd 100644
--- a/src/core/CL/cl_kernels/convolution_layer.cl
+++ b/src/core/CL/cl_kernels/convolution_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,7 +29,7 @@
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note The number of groups should be given as a preprocessor argument using -DNUM_GROUPS=number. e.g. -DNUM_GROUPS=2
*
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: All
* @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
@@ -43,7 +43,7 @@
* @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] bias_ptr Pointer to the bias tensor. Same as @p src_ptr
+ * @param[in] bias_ptr Pointer to the bias tensor. Supported data types: F16/F32, for quantized types this must be nullptr
* @param[in] bias_stride_x Stride of the bias tensor in X dimension (in bytes)
* @param[in] bias_step_x bias_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] bias_offset_first_element_in_bytes The offset of the first element in the source tensor
diff --git a/src/core/CL/cl_kernels/depth_convert.cl b/src/core/CL/cl_kernels/depth_convert.cl
index 75192e6a98..b48300fff2 100644
--- a/src/core/CL/cl_kernels/depth_convert.cl
+++ b/src/core/CL/cl_kernels/depth_convert.cl
@@ -38,11 +38,13 @@
/** This function performs a down-scaling depth conversion.
*
+ * @attention For QSYMM8_PER_CHANNEL -> QASYMM8, it is user's responsibility to keep track of the quantization info.
+ *
* @note The input and output data_types need to be passed at compile time using -DDATA_TYPE_IN and -DDATA_TYPE_OUT:
* e.g. -DDATA_TYPE_IN=uchar -DDATA_TYPE_OUT=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
*
- * @param[in] in_ptr Pointer to the source image. Supported data types: U8/U16/S16/U32/S32/F16/F32
+ * @param[in] in_ptr Pointer to the source image. Supported data types: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32
* @param[in] in_stride_x Stride of the source image in X dimension (in bytes)
* @param[in] in_step_x in_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] in_stride_y Stride of the source image in Y dimension (in bytes)
@@ -50,7 +52,7 @@
* @param[in] in_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] in_step_z in_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] in_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] out_ptr Pointer to the destination image. Supported data types: U8/U16/S16/U32/S32/F16/F32
+ * @param[out] out_ptr Pointer to the destination image. Supported data types: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32
* @param[in] out_stride_x Stride of the destination image in X dimension (in bytes)
* @param[in] out_step_x out_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] out_stride_y Stride of the destination image in Y dimension (in bytes)
@@ -73,6 +75,10 @@ __kernel void convert_depth_down(
VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE)
in_data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)in.ptr);
+#if defined(IS_DATA_TYPE_QUANTIZED)
+ in_data ^= 0x80;
+#endif // defined(IS_DATA_TYPE_QUANTIZED)
+
#if defined(IS_DATA_TYPE_FLOAT)
VSTORE(VEC_SIZE)
(CONVERT_DOWN(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)), 0, (__global DATA_TYPE_OUT *)out.ptr);
@@ -88,7 +94,7 @@ __kernel void convert_depth_down(
* e.g. -DDATA_TYPE_IN=uchar -DDATA_TYPE_OUT=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
*
- * @param[in] in_ptr Pointer to the source image. Supported data types: U8/U16/S16/U32/S32/F16/F32
+ * @param[in] in_ptr Pointer to the source image. Supported data types: U8/S8/U16/S16/U32/S32/F16/F32
* @param[in] in_stride_x Stride of the source image in X dimension (in bytes)
* @param[in] in_step_x in_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] in_stride_y Stride of the source image in Y dimension (in bytes)
diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/gemmlowp.cl
index 214c7a4825..7a97fa6fa1 100644
--- a/src/core/CL/cl_kernels/gemmlowp.cl
+++ b/src/core/CL/cl_kernels/gemmlowp.cl
@@ -1160,9 +1160,9 @@ __kernel void gemmlowp_matrix_b_reduction(TENSOR3D_DECLARATION(src),
#if defined(K_OFFSET)
-/* Helper function used to calculate the offset contribution after @ref CLGEMMLowpMatrixMultiplyKernel.
+/* Helper function used to calculate the offset contribution after matrix multiplication.
*
- * This kernel takes a final int32 accumulator value (the output of @CLGEMMLowpMatrixMultiplyKernel),
+ * This kernel takes a final int32 accumulator value (the output of matrix multiplication),
* and calculates the offset contribution of matrix A and matrix B.
*
* @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
@@ -1254,9 +1254,9 @@ inline int4 offset_contribution(
return (int4)K_OFFSET + a_offset_s32 + b_offset_s32;
}
-/* OpenCL kernel used to add the offset contribution after @ref CLGEMMLowpMatrixMultiplyKernel. The computation is performed in-place
+/* OpenCL kernel used to add the offset contribution after matrix multiplication. The computation is performed in-place
*
- * This kernel takes a final int32 accumulator value (the output of @CLGEMMLowpMatrixMultiplyKernel),
+ * This kernel takes a final int32 accumulator value (the output of matrix multiplication),
* and adds to it the offset contribution of matrix A and matrix B in-place.
*
* @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
@@ -1389,38 +1389,46 @@ __kernel void gemmlowp_offset_contribution(TENSOR3D_DECLARATION(mm_result)
* @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
* These values can be used to implement "rectified linear unit" activation functions
*
- * @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
- * @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] mm_result_step_x mm_result_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] mm_result_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] mm_result_step_y mm_result_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] mm_result_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] mm_result_step_z mm_result_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] mm_result_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
- * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
- * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
- * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
- * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
- * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
- * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
- * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
- * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
- * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
- * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
+ * @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] mm_result_step_x mm_result_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] mm_result_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] mm_result_step_y mm_result_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] mm_result_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] mm_result_step_z mm_result_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] mm_result_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
+ * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
+ * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
+ * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
+ * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
+ * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
+ * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
+ * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
+ * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
+ * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] result_multipliers_ptr (Optional) Pointer to the output multipliers vector for per-channel quantization. Supported data types: S32
+ * @param[in] result_multipliers_stride_x (Optional) Stride of the output multipliers vector in X dimension (in bytes)
+ * @param[in] result_multipliers_step_x (Optional) output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] result_multipliers_offset_first_element_in_bytes (Optional) The offset of the first element in the output multipliers vector
+ * @param[in] result_shifts_ptr (Optional) Pointer to the output shifts vector for per-channel quantization. Supported data types: S32
+ * @param[in] result_shifts_stride_x (Optional) Stride of the output shifts vector in X dimension (in bytes)
+ * @param[in] result_shifts_step_x (Optional) output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] result_shifts_offset_first_element_in_bytes (Optional) The offset of the first element in the output shifts vector
*/
__kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm_result)
#if defined(A_OFFSET)
@@ -1435,7 +1443,13 @@ __kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm
#if defined(ADD_BIAS)
VECTOR_DECLARATION(biases),
#endif // defined(ADD_BIAS)
- TENSOR3D_DECLARATION(dst))
+ TENSOR3D_DECLARATION(dst)
+#if defined(PER_CHANNEL_QUANTIZATION)
+ ,
+ VECTOR_DECLARATION(result_multipliers),
+ VECTOR_DECLARATION(result_shifts)
+#endif // defined(PER_CHANNEL_QUANTIZATION)
+ )
{
const int x = get_global_id(0) * 4;
const int y = get_global_id(1);
@@ -1486,9 +1500,19 @@ __kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm
in_s32 += (int4)RESULT_OFFSET;
// Multiply by result_mult_int and shift
+#if defined(PER_CHANNEL_QUANTIZATION)
+ __global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + x * sizeof(int);
+ __global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + x * sizeof(int);
+ int4 result_multipliers_values = vload4(0, (__global int *)result_multipliers_addr);
+ int4 result_shifts_values = vload4(0, (__global int *)result_shifts_addr);
+
+ in_s32 *= result_multipliers_values;
+ in_s32 >>= result_shifts_values;
+#else // defined(PER_CHANNEL_QUANTIZATION)
in_s32 *= RESULT_MULTIPLIER;
in_s32 >>= RESULT_SHIFT;
+#endif // defined(PER_CHANNEL_QUANTIZATION)
uchar4 res = convert_uchar4_sat(in_s32);
@@ -1503,9 +1527,9 @@ __kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm
vstore4(res, 0, dst_addr);
}
-/* OpenCL kernel used to add the offset contribution after @ref CLGEMMLowpMatrixMultiplyKernel and it quantizes down to uint8.
+/* OpenCL kernel used to add the offset contribution after matrix multiplication and it quantizes down to uint8.
*
- * This kernel takes a final int32 accumulator value (the output of @CLGEMMLowpMatrixMultiplyKernel), adds to it the offset contribution of matrix A and matrix B and quantizes to uint8 through the output stage.
+ * This kernel takes a final int32 accumulator value (the output of matrix multiplication), adds to it the offset contribution of matrix A and matrix B and quantizes to uint8 through the output stage.
*
*
* @attention The k_offset = a_offset * b_offset * k (where k is the number of matrix A columns) needs to be passed at compile time using -DK_OFFSET (i.e. -DK_OFFSET=1200)
@@ -1535,38 +1559,46 @@ __kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm
* @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
* These values can be used to implement "rectified linear unit" activation functions
*
- * @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
- * @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] mm_result_step_x mm_result_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] mm_result_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] mm_result_step_y mm_result_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] mm_result_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] mm_result_step_z mm_result_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] mm_result_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
- * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
- * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
- * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
- * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
- * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
- * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
- * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
- * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
- * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
- * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
+ * @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] mm_result_step_x mm_result_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] mm_result_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] mm_result_step_y mm_result_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] mm_result_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] mm_result_step_z mm_result_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] mm_result_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
+ * @param[in] sum_col_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
+ * @param[in] sum_col_step_x (Optional) sum_col_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] sum_col_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] sum_col_step_y (Optional) sum_col_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] sum_col_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
+ * @param[in] sum_row_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
+ * @param[in] sum_row_stride_x (Optional) Stride of the source tensor in X dimension (in bytes)
+ * @param[in] sum_row_step_x (Optional) sum_row_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] sum_row_stride_y (Optional) Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] sum_row_step_y (Optional) sum_row_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] sum_row_offset_first_element_in_bytes (Optional) The offset of the first element in the source tensor
+ * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Supported data type: same as @p src_ptr
+ * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
+ * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
+ * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] result_multipliers_ptr (Optional) Pointer to the output multipliers vector for per-channel quantization. Supported data types: S32
+ * @param[in] result_multipliers_stride_x (Optional) Stride of the output multipliers vector in X dimension (in bytes)
+ * @param[in] result_multipliers_step_x (Optional) output_multipliers_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] result_multipliers_offset_first_element_in_bytes (Optional) The offset of the first element in the output multipliers vector
+ * @param[in] result_shifts_ptr (Optional) Pointer to the output shifts vector for per-channel quantization. Supported data types: S32
+ * @param[in] result_shifts_stride_x (Optional) Stride of the output shifts vector in X dimension (in bytes)
+ * @param[in] result_shifts_step_x (Optional) output_shifts_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] result_shifts_offset_first_element_in_bytes (Optional) The offset of the first element in the output shifts vector
*/
__kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DECLARATION(mm_result)
#if defined(A_OFFSET)
@@ -1581,7 +1613,13 @@ __kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DEC
#if defined(ADD_BIAS)
VECTOR_DECLARATION(biases),
#endif // defined(ADD_BIAS)
- TENSOR3D_DECLARATION(dst))
+ TENSOR3D_DECLARATION(dst)
+#if defined(PER_CHANNEL_QUANTIZATION)
+ ,
+ VECTOR_DECLARATION(result_multipliers),
+ VECTOR_DECLARATION(result_shifts)
+#endif // defined(PER_CHANNEL_QUANTIZATION)
+ )
{
const int x = get_global_id(0) * 4;
const int y = get_global_id(1);
@@ -1629,7 +1667,16 @@ __kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DEC
// -------------- OUTPUT STAGE
// Multiply by result_mult_int and shift
+#if defined(PER_CHANNEL_QUANTIZATION)
+ __global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + x * sizeof(int);
+ __global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + x * sizeof(int);
+ int4 result_multipliers_values = vload4(0, (__global int *)result_multipliers_addr);
+ int4 result_shifts_values = vload4(0, (__global int *)result_shifts_addr);
+
+ in_s32 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(in_s32, result_multipliers_values, result_shifts_values, 4);
+#else // !defined(PER_CHANNEL_QUANTIZATION)
in_s32 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(in_s32, RESULT_MULTIPLIER, RESULT_SHIFT, 4);
+#endif // defined(PER_CHANNEL_QUANTIZATION)
// Add the offset terms to GEMM's result
in_s32 += (int4)RESULT_OFFSET;
@@ -1646,7 +1693,8 @@ __kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DEC
// Store the result
vstore4(res, 0, dst_addr);
}
-#endif // defined(K_OFFSET) && defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT)
+#endif // defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT)
+
#endif // defined(K_OFFSET)
#if defined(RESULT_OFFSET) && defined(RESULT_MULT_INT) && defined(RESULT_SHIFT)
@@ -1739,7 +1787,7 @@ __kernel void gemmlowp_output_stage_quantize_down(TENSOR3D_DECLARATION(src),
#if defined(RESULT_OFFSET_AFTER_SHIFT) && defined(RESULT_FIXEDPOINT_MULTIPLIER) && defined(RESULT_SHIFT)
/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8
*
- * This kernel takes a final int32 accumulator value (the output of @ref CLGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8 value.
+ * This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QASYMM8 value.
* The following computations will be performed by the kernel:
*
* -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
@@ -1825,7 +1873,7 @@ __kernel void gemmlowp_output_stage_quantize_down_fixedpoint(TENSOR3D_DECLARATIO
/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QSYMM16
*
- * This kernel takes a final int32 accumulator value (the output of @ref CLGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QSYMM16 value.
+ * This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QSYMM16 value.
* The following computations will be performed by the kernel:
*
* -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
@@ -1890,7 +1938,7 @@ __kernel void gemmlowp_output_stage_quantize_down_fixedpoint_qsymm16(TENSOR3D_DE
// Multiply by result_mult_int and shift
#if RESULT_SHIFT < 0
input_values = ASYMM_MULT(input_values * (1 << (-RESULT_SHIFT)), RESULT_FIXEDPOINT_MULTIPLIER, 4);
-#else // RESULT_SHIFT >= 0
+#else // RESULT_SHIFT >= 0
input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, 4);
#endif // RESULT_SHIFT < 0
@@ -1911,7 +1959,7 @@ __kernel void gemmlowp_output_stage_quantize_down_fixedpoint_qsymm16(TENSOR3D_DE
#if defined(REAL_MULTIPLIER) && defined(OUTPUT_OFFSET)
/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8
*
- * This kernel takes a final int32 accumulator value (the output of @ref CLGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8 value.
+ * This kernel takes a final int32 accumulator value (the output of matrix multiplication), and processes it to obtain the final QASYMM8 value.
* The following computations will be performed by the kernel:
*
* -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
diff --git a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp b/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
index 0b663e8498..f2119728c9 100644
--- a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
@@ -48,16 +48,17 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, C
ARM_COMPUTE_RETURN_ERROR_ON(input == output);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input,
1,
- DataType::U8, DataType::S8, DataType::S16,
+ DataType::U8, DataType::S8, DataType::QSYMM8_PER_CHANNEL, DataType::S16,
DataType::U16, DataType::U32, DataType::S32, DataType::F16,
DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output,
1,
- DataType::U8, DataType::S8, DataType::S16,
+ DataType::U8, DataType::S8, DataType::QASYMM8, DataType::S16,
DataType::U16, DataType::U32, DataType::S32, DataType::F16,
DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == output->data_type(), "Input and output data types must be different");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_float(input->data_type()) && shift != 0, "Shift is used only with integer inputs");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_float(input->data_type()) && shift != 0, "Shift is used only with integer non-quantized inputs");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized(input->data_type()) && shift != 0, "Shift is used only with integer non-quantized inputs");
ARM_COMPUTE_RETURN_ERROR_ON(shift >= 8);
// Validate in case of configured output
@@ -94,13 +95,14 @@ void CLDepthConvertLayerKernel::configure(const ICLTensor *input, ICLTensor *out
// Conversions from float always SATURATE as out-of-bounds conversion from float->integer is implementation defined
build_opts.add_option_if(is_data_type_float(input->info()->data_type()) || policy == ConvertPolicy::SATURATE, "-DSATURATE");
build_opts.add_option_if(is_data_type_float(input->info()->data_type()) || is_data_type_float(output->info()->data_type()), "-DIS_DATA_TYPE_FLOAT");
+ build_opts.add_option_if(is_data_type_quantized(input->info()->data_type()), "-DIS_DATA_TYPE_QUANTIZED");
// Create kernel
const std::string kernel_name = (input_size >= output_size) ? "convert_depth_down" : "convert_depth_up";
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
// Set shift arg
- unsigned int idx = 2 * num_arguments_per_3D_tensor(); //Skip the input and output parameters
+ unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the input and output parameters
_kernel.setArg(idx++, shift);
// Configure kernel
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp
index 4bcfa82ca7..09caeeea55 100644
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.cpp
@@ -46,8 +46,6 @@ namespace arm_compute
{
using namespace misc::shape_calculator;
-class Coordinates;
-
namespace
{
using ElementsProcessed = Steps;
@@ -56,7 +54,6 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1,
const GEMMReshapeInfo &gemm_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3");
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp
index 27d5b28943..779f96e7cf 100644
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.cpp
@@ -54,7 +54,6 @@ Status validate_arguments(const ITensorInfo *input0, const ITensorInfo *input1,
const GEMMReshapeInfo &gemm_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input0, input1, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3");
diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp b/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp
index 1852262337..2ebd76e1bf 100644
--- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp
@@ -37,17 +37,12 @@
#include <cstddef>
#include <cstdint>
-using namespace arm_compute;
-
namespace arm_compute
{
-class Coordinates;
-} // namespace arm_compute
-
namespace
{
Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output,
- int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage)
+ int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
ARM_COMPUTE_RETURN_ERROR_ON(output_stage.type == GEMMLowpOutputStageType::NONE);
@@ -61,6 +56,16 @@ Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vecto
ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
}
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1);
+ if(output_stage.is_quantized_per_channel)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != output_shifts->dimension(0));
+ ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != output_multipliers->dimension(0));
+ }
+
// If a_offset == 0, vector_sum_col can be a nullptr
if(a_offset != 0)
{
@@ -109,11 +114,14 @@ Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vecto
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mm_result, output);
}
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(output_stage.gemmlowp_multipliers.size() != output_stage.gemmlowp_shifts.size(),
+ "per channel quantization info is incorrect");
+
return Status{};
}
std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *vector_sum_col, ITensorInfo *vector_sum_row, ITensorInfo *bias, ITensorInfo *output,
- int32_t a_offset, int32_t b_offset)
+ int32_t a_offset, int32_t b_offset, ITensorInfo *output_multipliers, ITensorInfo *output_shifts)
{
constexpr unsigned int num_elems_processed_per_iteration = 4;
bool window_changed = false;
@@ -147,36 +155,55 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result,
window_changed = window_changed || update_window_and_padding(win, bias_access);
}
+ if(output_multipliers->dimension(0) > 1)
+ {
+ AccessWindowHorizontal output_multipliers_access(output_multipliers, 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal output_shifts_access(output_shifts, 0, num_elems_processed_per_iteration);
+ window_changed = window_changed || update_window_and_padding(win, output_multipliers_access, output_shifts_access);
+ }
+
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
}
} // namespace
CLGEMMLowpOffsetContributionOutputStageKernel::CLGEMMLowpOffsetContributionOutputStageKernel()
- : _mm_result(nullptr), _vector_sum_col(nullptr), _vector_sum_row(nullptr), _bias(nullptr), _output(nullptr)
+ : _mm_result(nullptr),
+ _vector_sum_col(nullptr),
+ _vector_sum_row(nullptr),
+ _bias(nullptr),
+ _output(nullptr),
+ _output_multipliers(nullptr),
+ _output_shifts(nullptr),
+ _is_quantized_per_channel(false)
{
}
void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const ICLTensor *mm_result, const ICLTensor *vector_sum_col, const ICLTensor *vector_sum_row, const ICLTensor *bias, ICLTensor *output,
- int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage)
+ int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
+ const ICLTensor *output_multipliers, const ICLTensor *output_shifts)
{
// Perform validate step
- ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, output, output_multipliers, output_shifts);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result->info(),
vector_sum_col != nullptr ? vector_sum_col->info() : nullptr,
vector_sum_row != nullptr ? vector_sum_row->info() : nullptr,
bias != nullptr ? bias->info() : nullptr,
output->info(),
- a_offset, b_offset, output_stage)); // NOLINT
+ a_offset, b_offset, output_stage,
+ output_multipliers->info(), output_shifts->info())); // NOLINT
const int min = output_stage.gemmlowp_min_bound;
const int max = output_stage.gemmlowp_max_bound;
- _vector_sum_col = vector_sum_col;
- _vector_sum_row = vector_sum_row;
- _mm_result = mm_result;
- _bias = bias;
- _output = output;
+ _vector_sum_col = vector_sum_col;
+ _vector_sum_row = vector_sum_row;
+ _mm_result = mm_result;
+ _bias = bias;
+ _output = output;
+ _output_multipliers = output_multipliers;
+ _output_shifts = output_shifts;
+ _is_quantized_per_channel = output_stage.is_quantized_per_channel;
// Check if input is a 3D reinterpretation
const bool reinterpret_as_3d = vector_sum_row != nullptr
@@ -199,8 +226,9 @@ void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const ICLTensor *m
build_opts.add_option_if(reinterpret_as_3d, "-DDEPTH_INPUT3D=" + support::cpp11::to_string(mm_result->info()->dimension(2)));
build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
build_opts.add_option("-DRESULT_OFFSET=" + support::cpp11::to_string(output_stage.gemmlowp_offset));
- build_opts.add_option("-DRESULT_MULTIPLIER=" + support::cpp11::to_string(output_stage.gemmlowp_multiplier));
- build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(output_stage.gemmlowp_shift));
+ build_opts.add_option("-DRESULT_MULTIPLIER=" + support::cpp11::to_string(output_stage.gemmlowp_multipliers[0]));
+ build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(output_stage.gemmlowp_shifts[0]));
+ build_opts.add_option_if(_is_quantized_per_channel, "-DPER_CHANNEL_QUANTIZATION");
build_opts.add_option_if((min != 0) && (min != max), "-DMIN_BOUND=" + support::cpp11::to_string(min));
build_opts.add_option_if((max != 255) && (min != max), "-DMAX_BOUND=" + support::cpp11::to_string(max));
@@ -225,7 +253,8 @@ void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const ICLTensor *m
vector_sum_row != nullptr ? vector_sum_row->info() : nullptr,
bias != nullptr ? bias->info() : nullptr,
output->info(),
- a_offset, b_offset); // NOLINT
+ a_offset, b_offset,
+ output_multipliers->info(), output_shifts->info()); // NOLINT
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
@@ -239,16 +268,17 @@ void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const ICLTensor *m
}
Status CLGEMMLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
- const ITensorInfo *output,
- int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage)
+ const ITensorInfo *output, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
+ const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage, output_multipliers, output_shifts));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(mm_result->clone().get(),
vector_sum_col != nullptr ? vector_sum_col->clone().get() : nullptr,
vector_sum_row != nullptr ? vector_sum_row->clone().get() : nullptr,
bias != nullptr ? bias->clone().get() : nullptr,
output->clone().get(),
- a_offset, b_offset)
+ a_offset, b_offset,
+ output_multipliers->clone().get(), output_shifts->clone().get())
.first); // NOLINT
return Status{};
@@ -285,7 +315,10 @@ void CLGEMMLowpOffsetContributionOutputStageKernel::run(const Window &window, cl
add_2D_tensor_argument_if((_vector_sum_row != nullptr), idx, _vector_sum_row, win_vector_sum_row);
add_1D_tensor_argument_if((_bias != nullptr), idx, _bias, biases_slice);
add_3D_tensor_argument(idx, _output, slice);
+ add_1D_tensor_argument_if(_is_quantized_per_channel, idx, _output_multipliers, biases_slice);
+ add_1D_tensor_argument_if(_is_quantized_per_channel, idx, _output_shifts, biases_slice);
enqueue(queue, *this, slice, lws_hint());
}
while(collapsed.slide_window_slice_3D(slice));
}
+} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp b/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp
index 6f6019d26a..3d681dd13e 100644
--- a/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp
@@ -55,9 +55,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
ARM_COMPUTE_RETURN_ERROR_ON((rhs_info.k0 == 1) && (rhs_info.transpose));
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::U8, DataType::S8,
- DataType::U16, DataType::S16, DataType::U32, DataType::S32,
- DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
if(output->total_size() != 0)
{
diff --git a/src/core/CL/kernels/CLWeightsReshapeKernel.cpp b/src/core/CL/kernels/CLWeightsReshapeKernel.cpp
index 9330b3b8a1..e325feac1f 100644
--- a/src/core/CL/kernels/CLWeightsReshapeKernel.cpp
+++ b/src/core/CL/kernels/CLWeightsReshapeKernel.cpp
@@ -33,7 +33,8 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-using namespace arm_compute;
+namespace arm_compute
+{
using namespace arm_compute::misc::shape_calculator;
namespace
@@ -42,7 +43,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *biases, c
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
ARM_COMPUTE_RETURN_ERROR_ON(num_groups == 0);
ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::NHWC && num_groups > 1);
ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4 && num_groups > 1);
@@ -50,7 +51,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *biases, c
if(biases != nullptr)
{
- ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(input->data_type()));
+ ARM_COMPUTE_RETURN_ERROR_ON(!is_data_type_float(input->data_type()));
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases);
ARM_COMPUTE_RETURN_ERROR_ON((input->num_dimensions() == 4) && (biases->num_dimensions() != 1));
ARM_COMPUTE_RETURN_ERROR_ON((input->num_dimensions() == 5) && (biases->num_dimensions() != 2));
@@ -160,3 +161,4 @@ void CLWeightsReshapeKernel::run(const Window &window, cl::CommandQueue &queue)
}
while(window.slide_window_slice_4D(in_slice) && out_window.slide_window_slice_2D(out_slice));
}
+} // namespace arm_compute
diff --git a/src/core/utils/quantization/AsymmHelpers.cpp b/src/core/utils/quantization/AsymmHelpers.cpp
index 386d75eca2..7e22a814b5 100644
--- a/src/core/utils/quantization/AsymmHelpers.cpp
+++ b/src/core/utils/quantization/AsymmHelpers.cpp
@@ -173,14 +173,18 @@ std::pair<int, int> get_min_max_values_from_quantized_data_type(DataType data_ty
}
return std::make_pair(min_quant_val, max_quant_val);
}
-void compute_quantized_multipliers_and_shifts(const ITensor *input, const ITensor *weights, const ITensor *output, int32_t *output_multipliers_ptr, int32_t *output_shifts_ptr)
+void compute_quantized_multipliers_and_shifts(const ITensorInfo *input,
+ const ITensorInfo *weights,
+ const ITensorInfo *output,
+ unsigned int idx_ofms,
+ int32_t *output_multipliers_ptr,
+ int32_t *output_shifts_ptr)
{
- const unsigned int idx_c = get_data_layout_dimension_index(weights->info()->data_layout(), DataLayoutDimension::CHANNEL);
- const unsigned int num_filters = is_data_type_quantized_per_channel(weights->info()->data_type()) ? weights->info()->dimension(idx_c) : 1;
+ const unsigned int num_filters = is_data_type_quantized_per_channel(weights->data_type()) ? weights->dimension(idx_ofms) : 1;
- const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
- const QuantizationInfo wq_info = weights->info()->quantization_info();
- const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
+ const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
+ const QuantizationInfo wq_info = weights->quantization_info();
+ const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
for(unsigned int i = 0; i < num_filters; ++i)
{
diff --git a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
index cdf3a95568..e717f793fd 100644
--- a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
@@ -337,9 +337,11 @@ void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::prepare()
{
_output_multipliers.map();
_output_shifts.map();
- quantization::compute_quantized_multipliers_and_shifts(_input,
- _original_weights,
- _output,
+ const unsigned int idx_ofms = get_data_layout_dimension_index(_output->info()->data_layout(), DataLayoutDimension::CHANNEL);
+ quantization::compute_quantized_multipliers_and_shifts(_input->info(),
+ _original_weights->info(),
+ _output->info(),
+ idx_ofms,
reinterpret_cast<int32_t *>(_output_multipliers.ptr_to_element(Coordinates(0))),
reinterpret_cast<int32_t *>(_output_shifts.ptr_to_element(Coordinates(0))));
_output_multipliers.unmap();
@@ -533,9 +535,11 @@ void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::prepar
{
_output_multipliers.map();
_output_shifts.map();
- quantization::compute_quantized_multipliers_and_shifts(_input,
- _original_weights,
- _output,
+ const unsigned int idx_ofms = get_data_layout_dimension_index(_output->info()->data_layout(), DataLayoutDimension::CHANNEL);
+ quantization::compute_quantized_multipliers_and_shifts(_input->info(),
+ _original_weights->info(),
+ _output->info(),
+ idx_ofms,
reinterpret_cast<int32_t *>(_output_multipliers.ptr_to_element(Coordinates(0))),
reinterpret_cast<int32_t *>(_output_shifts.ptr_to_element(Coordinates(0))));
_output_multipliers.unmap();
diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
index 5bcf38d1c4..a8167ce8f7 100644
--- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
@@ -68,6 +68,8 @@ Status construct_gemmlowp_output_stage(const ITensorInfo &input, const ITensorIn
gemmlowp_output_stage.gemmlowp_shift = output_shift;
gemmlowp_output_stage.gemmlowp_min_bound = 0;
gemmlowp_output_stage.gemmlowp_max_bound = 255;
+ gemmlowp_output_stage.gemmlowp_multipliers.push_back(output_multiplier);
+ gemmlowp_output_stage.gemmlowp_shifts.push_back(output_shift);
}
return Status{};
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index 831f108b85..d322723150 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -66,13 +66,14 @@ void CLConvolutionLayerReshapeWeights::configure(const ICLTensor *weights, const
Status CLConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, unsigned int num_groups)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(weights);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weights, 1, DataType::QASYMM8, DataType::QSYMM8_PER_CHANNEL, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
if(biases != nullptr)
{
const int idx_kernels = get_data_layout_dimension_index(weights->data_layout(), DataLayoutDimension::BATCHES);
- ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(weights->data_type()));
+ ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized(weights->data_type()));
+
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases);
ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(idx_kernels));
ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
@@ -81,7 +82,6 @@ Status CLConvolutionLayerReshapeWeights::validate(const ITensorInfo *weights, co
if((output != nullptr) && (output->total_size() != 0))
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, output);
-
CLWeightsReshapeKernel::validate(weights, biases, output, num_groups);
}
@@ -201,9 +201,9 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *
const unsigned int kernel_width = weights->info()->dimension(idx_width);
const unsigned int kernel_height = weights->info()->dimension(idx_height);
+ const unsigned int num_kernels = weights->info()->dimension(idx_kernels);
const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
- const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform();
const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
_is_prepared = weights_info.retain_internal_weights();
@@ -237,7 +237,7 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *
conv_info,
dilation);
- unsigned int mat_weights_cols = weights->info()->dimension(idx_kernels) / num_groups;
+ unsigned int mat_weights_cols = num_kernels / num_groups;
const ICLTensor *biases_to_use = biases;
bool append_bias = false;
@@ -310,20 +310,28 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *
}
GEMMLowpOutputStageInfo gemmlowp_output_stage;
- gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
- gemmlowp_output_stage.gemmlowp_offset = 0;
- gemmlowp_output_stage.gemmlowp_multiplier = 0;
- gemmlowp_output_stage.gemmlowp_shift = 0;
+ gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
+ gemmlowp_output_stage.gemmlowp_offset = 0;
// Configure output stage for quantized case
if(_is_quantized)
{
- const auto output_quant_info = (output->info()->total_size() == 0) ? iq_info : oq_info;
-
- const float multiplier = (iq_info.scale * wq_info.scale) / output_quant_info.scale;
- int output_multiplier = 0;
- int output_shift = 0;
- quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
+ const auto output_quant_info = (output->info()->total_size() == 0) ? iq_info : oq_info;
+ const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->info()->data_type());
+ const unsigned int num_filters = (is_quantized_per_channel) ? num_kernels : 1;
+
+ gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
+
+ gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
+ gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
+ quantization::compute_quantized_multipliers_and_shifts(input->info(),
+ weights->info(),
+ output->info(),
+ idx_kernels,
+ gemmlowp_output_stage.gemmlowp_multipliers.data(),
+ gemmlowp_output_stage.gemmlowp_shifts.data());
+ gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
+ gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
int min_activation = 0;
int max_activation = 0;
@@ -350,11 +358,9 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor *
}
// Set the GEMMLowp output stage info
- gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
- gemmlowp_output_stage.gemmlowp_multiplier = output_multiplier;
- gemmlowp_output_stage.gemmlowp_shift = output_shift;
- gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
- gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
+ gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
+ gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
+ gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
}
// Configure and tune GEMM
@@ -396,8 +402,17 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!");
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QSYMM8_PER_CHANNEL, DataType::F16, DataType::F32);
+ const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->data_type());
+
+ if(is_quantized_per_channel)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() != DataType::QASYMM8, "Input data type not compatible with Weights");
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
+ }
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_layout() != DataLayout::NCHW), "Grouping (num_groups != 1) with NHWC data layout is not supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1) && (input->data_type() == DataType::QASYMM8), "Grouping (num_groups != 1) is not supported with QASYMM8");
@@ -412,6 +427,7 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
const unsigned int kernel_width = weights->dimension(idx_width);
const unsigned int kernel_height = weights->dimension(idx_height);
+ const unsigned int num_kernels = weights->dimension(idx_kernels);
TensorInfo im2col_reshaped_info{};
TensorInfo info_gemm{};
@@ -419,15 +435,10 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
const ITensorInfo *gemm_input_to_use = input;
const ITensorInfo *gemm_output_to_use = output;
const ITensorInfo *weights_to_use = weights;
-
- const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
- const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
- const bool skip_col2im = data_layout == DataLayout::NHWC;
- bool fuse_activation = true;
-
- const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
- const UniformQuantizationInfo wq_info = weights->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
+ const bool is_quantized = is_data_type_quantized_asymmetric(data_type);
+ const bool skip_im2col = (data_layout == DataLayout::NHWC && kernel_width == 1 && kernel_height == 1 && conv_info.stride().first == 1 && conv_info.stride().second == 1);
+ const bool skip_col2im = data_layout == DataLayout::NHWC;
+ bool fuse_activation = true;
ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(idx_channel) * num_groups) != input->dimension(idx_channel));
ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 4);
@@ -463,7 +474,7 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
conv_info,
dilation);
- unsigned int mat_weights_cols = weights->dimension(idx_kernels) / num_groups;
+ unsigned int mat_weights_cols = num_kernels / num_groups;
const ITensorInfo *biases_to_use = biases;
bool append_bias = false;
@@ -514,20 +525,27 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
}
GEMMLowpOutputStageInfo gemmlowp_output_stage;
- gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
- gemmlowp_output_stage.gemmlowp_offset = 0;
- gemmlowp_output_stage.gemmlowp_multiplier = 0;
- gemmlowp_output_stage.gemmlowp_shift = 0;
+ gemmlowp_output_stage.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
+ gemmlowp_output_stage.gemmlowp_offset = 0;
+ gemmlowp_output_stage.is_quantized_per_channel = is_quantized_per_channel;
if(is_quantized)
{
- const auto output_quant_info = (output->total_size() == 0) ? iq_info : oq_info;
-
- const float multiplier = (iq_info.scale * wq_info.scale) / output_quant_info.scale;
- int output_multiplier = 0;
- int output_shift = 0;
-
- ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift));
+ const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
+ const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
+ const auto output_quant_info = (output->total_size() == 0) ? iq_info : oq_info;
+ const unsigned int num_filters = (is_quantized_per_channel) ? num_kernels : 1;
+
+ gemmlowp_output_stage.gemmlowp_multipliers.resize(num_filters);
+ gemmlowp_output_stage.gemmlowp_shifts.resize(num_filters);
+ quantization::compute_quantized_multipliers_and_shifts(input,
+ weights,
+ output,
+ idx_kernels,
+ gemmlowp_output_stage.gemmlowp_multipliers.data(),
+ gemmlowp_output_stage.gemmlowp_shifts.data());
+ gemmlowp_output_stage.gemmlowp_multiplier = gemmlowp_output_stage.gemmlowp_multipliers[0];
+ gemmlowp_output_stage.gemmlowp_shift = gemmlowp_output_stage.gemmlowp_shifts[0];
int min_activation = 0;
int max_activation = 0;
@@ -554,11 +572,9 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
}
// Set the GEMMLowp output stage info
- gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
- gemmlowp_output_stage.gemmlowp_multiplier = output_multiplier;
- gemmlowp_output_stage.gemmlowp_shift = output_shift;
- gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
- gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
+ gemmlowp_output_stage.gemmlowp_offset = output_quant_info.offset;
+ gemmlowp_output_stage.gemmlowp_min_bound = min_activation;
+ gemmlowp_output_stage.gemmlowp_max_bound = max_activation;
}
// In case of NHWC, we need to run GEMM3D (gemm_3d_depth != 0) in order to avoid reshaping the output matrix
diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
index 0286cb3d6d..4c0a521de8 100644
--- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
@@ -32,6 +32,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
namespace arm_compute
@@ -49,6 +50,7 @@ inline bool is_gemm_reshaped(bool reshape_b_only_on_first_run, GPUTarget gpu_tar
CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)),
+ _weights_to_qasymm8(),
_mm_midgard_kernel(),
_mm_native_kernel(),
_mm_reshaped_only_rhs_kernel(),
@@ -57,18 +59,24 @@ CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemo
_mtx_b_reduction_kernel(),
_offset_contribution_kernel(),
_offset_contribution_output_stage_kernel(),
+ _qasymm8_weights(),
_vector_sum_col(),
_vector_sum_row(),
_tmp_b(),
_mm_result_s32(),
+ _gemm_output_stage_multipliers(),
+ _gemm_output_stage_shifts(),
+ _matrix_a(nullptr),
_original_b(nullptr),
+ _output(nullptr),
_a_offset(0),
_b_offset(0),
_is_gemm_reshaped(true),
_is_midgard(false),
_reshape_b_only_on_first_run(false),
_is_prepared(false),
- _fuse_output_stage(false)
+ _fuse_output_stage(false),
+ _convert_to_qasymm8(false)
{
}
@@ -81,7 +89,12 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor
_original_b = b;
_reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
_a_offset = a->info()->quantization_info().uniform().offset;
- _b_offset = b->info()->quantization_info().uniform().offset;
+ _matrix_a = a;
+ _output = output;
+
+ _convert_to_qasymm8 = is_data_type_quantized_per_channel(b->info()->data_type()) && is_data_type_quantized_symmetric(b->info()->data_type())
+ && is_data_type_quantized_asymmetric(a->info()->data_type());
+ _b_offset = _convert_to_qasymm8 ? -128 : b->info()->quantization_info().uniform().offset;
// Get the GPU target
const GPUTarget gpu_target = CLScheduler::get().target();
@@ -91,8 +104,6 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor
_mm_native_kernel.set_target(gpu_target);
_mm_reshaped_only_rhs_kernel.set_target(gpu_target);
- const ICLTensor *matrix_a = a;
- const ICLTensor *matrix_b = b;
GEMMRHSMatrixInfo rhs_info;
GEMMLHSMatrixInfo lhs_info;
@@ -110,6 +121,16 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor
_is_gemm_reshaped = is_gemm_reshaped(_reshape_b_only_on_first_run, gpu_target);
_is_midgard = gpu_target == GPUTarget::MIDGARD;
+ if(_convert_to_qasymm8)
+ {
+ // Set data type for converted weights
+ TensorInfo weights_info(*b->info());
+ weights_info.set_data_type(DataType::QASYMM8);
+ _qasymm8_weights.allocator()->init(weights_info);
+ _weights_to_qasymm8.configure(b, &_qasymm8_weights, ConvertPolicy::WRAP, 0);
+ }
+
+ const ICLTensor *matrix_b = _convert_to_qasymm8 ? &_qasymm8_weights : b;
if(_is_gemm_reshaped)
{
matrix_b = &_tmp_b;
@@ -123,7 +144,7 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor
std::tie(lhs_info, rhs_info) = CLGEMMReshapedOnlyRHSKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8);
// Configure reshape RHS kernel
- _mtx_b_reshape_kernel.configure(b, &_tmp_b, rhs_info);
+ _mtx_b_reshape_kernel.configure(_convert_to_qasymm8 ? &_qasymm8_weights : b, &_tmp_b, rhs_info);
}
// Initialize matrix B reduction kernel only if _a_offset is not equal to 0
@@ -137,7 +158,7 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor
}
// Configure Matrix B reduction kernel
- _mtx_b_reduction_kernel.configure(b, &_vector_sum_col);
+ _mtx_b_reduction_kernel.configure(_convert_to_qasymm8 ? &_qasymm8_weights : b, &_vector_sum_col);
}
// Initialize Matrix A reduction kernel only if _b_offset is not equal to 0
@@ -161,14 +182,14 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor
if(_is_gemm_reshaped)
{
// Configure and tune matrix multiply kernel
- _mm_reshaped_only_rhs_kernel.configure(matrix_a, matrix_b, &_mm_result_s32, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
+ _mm_reshaped_only_rhs_kernel.configure(_matrix_a, matrix_b, &_mm_result_s32, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
}
else
{
if(_is_midgard)
{
// Configure matrix multiply kernel
- _mm_midgard_kernel.configure(matrix_a, matrix_b, &_mm_result_s32, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
+ _mm_midgard_kernel.configure(_matrix_a, matrix_b, &_mm_result_s32, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
}
else
{
@@ -176,13 +197,27 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor
std::tie(lhs_info, rhs_info) = CLGEMMNativeKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8);
// Configure matrix multiply kernel
- _mm_native_kernel.configure(matrix_a, matrix_b, &_mm_result_s32, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
+ _mm_native_kernel.configure(_matrix_a, matrix_b, &_mm_result_s32, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
}
}
-
// Configure offset contribution kernel
+ const size_t num_filters = (gemm_info.gemmlowp_output_stage().is_quantized_per_channel) ? gemm_info.gemmlowp_output_stage().gemmlowp_multipliers.size() : 1;
+
+ _gemm_output_stage_multipliers.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
+ _gemm_output_stage_shifts.allocator()->init(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
+
_offset_contribution_output_stage_kernel.configure(&_mm_result_s32, _a_offset == 0 ? nullptr : &_vector_sum_col, _b_offset == 0 ? nullptr : &_vector_sum_row, c, output, a->info()->dimension(0),
- _a_offset, _b_offset, gemm_info.gemmlowp_output_stage());
+ _a_offset, _b_offset, gemm_info.gemmlowp_output_stage(), &_gemm_output_stage_multipliers, &_gemm_output_stage_shifts);
+
+ _gemm_output_stage_multipliers.allocator()->allocate();
+ _gemm_output_stage_shifts.allocator()->allocate();
+ // Compute GEMM output multipliers and shifts for output stage
+ _gemm_output_stage_multipliers.map();
+ _gemm_output_stage_shifts.map();
+ std::memcpy(_gemm_output_stage_multipliers.ptr_to_element(Coordinates(0)), gemm_info.gemmlowp_output_stage().gemmlowp_multipliers.data(), num_filters * sizeof(int32_t));
+ std::memcpy(_gemm_output_stage_shifts.ptr_to_element(Coordinates(0)), gemm_info.gemmlowp_output_stage().gemmlowp_shifts.data(), num_filters * sizeof(int32_t));
+ _gemm_output_stage_multipliers.unmap();
+ _gemm_output_stage_shifts.unmap();
_mm_result_s32.allocator()->allocate();
}
@@ -191,14 +226,14 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor
if(_is_gemm_reshaped)
{
// Configure and tune matrix multiply kernel
- _mm_reshaped_only_rhs_kernel.configure(matrix_a, matrix_b, output, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
+ _mm_reshaped_only_rhs_kernel.configure(_matrix_a, matrix_b, output, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
}
else
{
if(_is_midgard)
{
// Configure matrix multiply kernel
- _mm_midgard_kernel.configure(matrix_a, matrix_b, output, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
+ _mm_midgard_kernel.configure(_matrix_a, matrix_b, output, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
}
else
{
@@ -206,7 +241,7 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor
std::tie(lhs_info, rhs_info) = CLGEMMNativeKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8);
// Configure matrix multiply kernel
- _mm_native_kernel.configure(matrix_a, matrix_b, output, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
+ _mm_native_kernel.configure(_matrix_a, matrix_b, output, lhs_info, rhs_info, GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d));
}
}
@@ -237,7 +272,15 @@ void CLGEMMLowpMatrixMultiplyCore::configure(const ICLTensor *a, const ICLTensor
Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
+ if(b->data_type() == DataType::QSYMM8_PER_CHANNEL)
+ {
+ //DataType::QSYMM8_PER_CHANNEL supported only for weights
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() != DataType::QASYMM8, "Matrix A is not quantized while Matrix B is");
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
+ }
ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_a_reshaped(), "Matrix A already reshaped is not supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(gemm_info.is_b_reshaped(), "Matrix B already reshaped is not supported");
@@ -245,7 +288,6 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso
int32_t b_offset = b->quantization_info().uniform().offset;
const ITensorInfo *matrix_a_info = a;
- const ITensorInfo *matrix_b_info = b;
TensorInfo tmp_b_info{};
GEMMRHSMatrixInfo rhs_info;
@@ -266,6 +308,16 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso
const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d);
+ bool convert_to_qasymm8 = is_data_type_quantized_per_channel(b->data_type()) && is_data_type_quantized_symmetric(b->data_type())
+ && is_data_type_quantized_asymmetric(a->data_type());
+ TensorInfo weights_info(*b);
+ if(convert_to_qasymm8)
+ {
+ b_offset = -128;
+ weights_info.set_data_type(DataType::QASYMM8);
+ ARM_COMPUTE_RETURN_ON_ERROR(CLDepthConvertLayerKernel::validate(b, &weights_info, ConvertPolicy::WRAP, 0));
+ }
+ const ITensorInfo *matrix_b_info = &weights_info;
if(reshape_matrix_b)
{
matrix_b_info = &tmp_b_info;
@@ -274,8 +326,8 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso
std::tie(lhs_info, rhs_info) = CLGEMMReshapedOnlyRHSKernelConfigurationFactory::create(gpu_target)->configure(m, n, k, batch_size, DataType::QASYMM8);
// Validate reshape RHS kernel
- auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_rhs_reshaped_shape(*b, rhs_info)));
- ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeRHSMatrixKernel::validate(b, &tmp_b_info, rhs_info));
+ auto_init_if_empty(tmp_b_info, weights_info.clone()->set_tensor_shape(compute_rhs_reshaped_shape(weights_info, rhs_info)));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMReshapeRHSMatrixKernel::validate(&weights_info, &tmp_b_info, rhs_info));
}
TensorInfo info_vector_sum_col{};
@@ -284,10 +336,10 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso
// Validate matrix B reduction kernel only if _a_offset is not equal to 0
if(a_offset != 0)
{
- info_vector_sum_col = TensorInfo(compute_reductionA_shape(*b), 1, DataType::S32);
+ info_vector_sum_col = TensorInfo(compute_reductionA_shape(weights_info), 1, DataType::S32);
// Configure Matrix B reduction kernel
- ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixBReductionKernel::validate(b, &info_vector_sum_col));
+ ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixBReductionKernel::validate(&weights_info, &info_vector_sum_col));
}
// Validate Matrix A reduction kernel only if _b_offset is not equal to 0
@@ -332,13 +384,19 @@ Status CLGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso
}
// Validate offset contribution kernel
+ const size_t num_filters = (gemm_info.gemmlowp_output_stage().is_quantized_per_channel) ? gemm_info.gemmlowp_output_stage().gemmlowp_multipliers.size() : 1;
+
+ const TensorInfo gemm_output_stage_multipliers_shifts_info(TensorInfo(TensorShape(num_filters), 1, DataType::S32));
+
ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOffsetContributionOutputStageKernel::validate(&mm_result_s32_info,
a_offset == 0 ? nullptr : &info_vector_sum_col,
b_offset == 0 ? nullptr : &info_vector_sum_row,
c,
output,
a_offset, b_offset,
- gemm_info.gemmlowp_output_stage()));
+ gemm_info.gemmlowp_output_stage(),
+ &gemm_output_stage_multipliers_shifts_info,
+ &gemm_output_stage_multipliers_shifts_info));
}
else
{
@@ -438,6 +496,12 @@ void CLGEMMLowpMatrixMultiplyCore::prepare()
{
if(!_is_prepared)
{
+ if(_convert_to_qasymm8)
+ {
+ _qasymm8_weights.allocator()->allocate();
+ CLScheduler::get().enqueue(_weights_to_qasymm8, false);
+ }
+
if(_is_gemm_reshaped && _reshape_b_only_on_first_run)
{
ARM_COMPUTE_ERROR_ON(!_original_b->is_used());