aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2020-10-21 11:36:21 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2020-10-21 17:36:37 +0000
commit0bfe39fadbeb3ea51482d800fab1204e4428c37d (patch)
tree47be9f182753eed5b0572497f4d2578bb5742997
parentde5930ff855d91f31a79bd3cf4244bb9d321abde (diff)
downloadComputeLibrary-0bfe39fadbeb3ea51482d800fab1204e4428c37d.tar.gz
COMPMID-3722: Remove OpenCL padding: CLGEMMLowpOffsetContributionKernel
COMPMID-3723: Remove OpenCL padding: CLGEMMLowpOffsetContributionOutputStageKernel Change-Id: Iac265c2ac4c5749352daa311279a3b8c60ac3b3d Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4228 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/CL/cl_kernels/gemmlowp.cl184
-rw-r--r--src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp66
-rw-r--r--src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp80
3 files changed, 122 insertions, 208 deletions
diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/gemmlowp.cl
index cc0d583e7d..feefaa7197 100644
--- a/src/core/CL/cl_kernels/gemmlowp.cl
+++ b/src/core/CL/cl_kernels/gemmlowp.cl
@@ -1319,7 +1319,9 @@ __kernel void gemmlowp_matrix_b_reduction(TENSOR3D_DECLARATION(src),
#endif // defined(DATA_TYPE) && defined(ACC_DATA_TYPE)
-#if defined(K_OFFSET)
+#if defined(K_OFFSET) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
+
+#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE)
/* Helper function used to calculate the offset contribution after matrix multiplication.
*
@@ -1330,8 +1332,10 @@ __kernel void gemmlowp_matrix_b_reduction(TENSOR3D_DECLARATION(src),
* @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
* @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
* @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
*
- * @param[in] x get_global_id(0) * 4
+ * @param[in] x max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0)
* @param[in] y get_global_id(1)
* @param[in] z get_global_id(2)
* @param[in] sum_col_ptr (Optional) Pointer to the source tensor. Supported data type: same as @p mm_result_ptr
@@ -1351,7 +1355,7 @@ __kernel void gemmlowp_matrix_b_reduction(TENSOR3D_DECLARATION(src),
* @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
*/
-inline int4 offset_contribution(
+inline VEC_INT offset_contribution(
int x,
int y,
int z
@@ -1369,8 +1373,8 @@ inline int4 offset_contribution(
#endif // defined(ADD_BIAS)
)
{
- int4 a_offset_s32 = (int4)0;
- int4 b_offset_s32 = (int4)0;
+ VEC_INT a_offset_s32 = (VEC_INT)0;
+ VEC_INT b_offset_s32 = (VEC_INT)0;
int batch_id = z;
#if defined(DEPTH_INPUT3D)
@@ -1383,12 +1387,12 @@ inline int4 offset_contribution(
// Compute the offset contribution due to A_OFFSET
#if defined(SUM_COL_HAS_BATCHES)
- a_offset_s32 = vload4(0, (__global int *)(sum_col_addr + batch_id * sum_col_stride_y));
+ a_offset_s32 = VLOAD(VEC_SIZE)(0, (__global int *)(sum_col_addr + batch_id * sum_col_stride_y));
#else // defined(SUM_COL_HAS_BATCHES)
- a_offset_s32 = vload4(0, (__global int *)sum_col_addr);
+ a_offset_s32 = VLOAD(VEC_SIZE)(0, (__global int *)sum_col_addr);
#endif // defined(SUM_COL_HAS_BATCHES)
- a_offset_s32 *= (int4)A_OFFSET;
+ a_offset_s32 *= (VEC_INT)A_OFFSET;
#endif // defined(A_OFFSET)
#if defined(B_OFFSET)
@@ -1397,22 +1401,22 @@ inline int4 offset_contribution(
// Compute the offset contribution due to B_OFFSET
#if defined(HEIGHT_INPUT3D) && defined(DEPTH_INPUT3D)
- b_offset_s32 = (int4) * (((__global int *)(sum_row_addr + batch_id * sum_row_stride_y)) + (z % (int)DEPTH_INPUT3D) * (int)HEIGHT_INPUT3D);
+ b_offset_s32 = (VEC_INT) * (((__global int *)(sum_row_addr + batch_id * sum_row_stride_y)) + (z % (int)DEPTH_INPUT3D) * (int)HEIGHT_INPUT3D);
#else // defined(HEIGHT_INPUT3D) && defined(DEPTH_INPUT3D)
- b_offset_s32 = (int4) * (((__global int *)(sum_row_addr + batch_id * sum_row_stride_y)));
+ b_offset_s32 = (VEC_INT) * (((__global int *)(sum_row_addr + batch_id * sum_row_stride_y)));
#endif // defined(HEIGHT_INPUT3D) && defined(DEPTH_INPUT3D)
- b_offset_s32 *= (int4)B_OFFSET;
+ b_offset_s32 *= (VEC_INT)B_OFFSET;
#endif // defined(B_OFFSET)
#if defined(ADD_BIAS)
// Add bias
__global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int);
- int4 biases_values = vload4(0, (__global int *)bias_addr);
- b_offset_s32 += (int4)biases_values;
+ VEC_INT biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr);
+ b_offset_s32 += (VEC_INT)biases_values;
#endif // defined(ADD_BIAS)
- return (int4)K_OFFSET + a_offset_s32 + b_offset_s32;
+ return (VEC_INT)K_OFFSET + a_offset_s32 + b_offset_s32;
}
/* OpenCL kernel used to add the offset contribution after matrix multiplication. The computation is performed in-place
@@ -1424,6 +1428,8 @@ inline int4 offset_contribution(
* @note In case the offset contribution due to a_offset is required, a_offset needs to be passed at compile time using -DA_OFFSET (i.e. -DA_OFFSET=1)
* @note In case the offset contribution due to b_offset is required, b_offset needs to be passed at compile time using -DB_OFFSET (i.e. -DB_OFFSET=6)
* @note In case sum_col has batches, -DSUM_COL_HAS_BATCHES must be passed at compile time. Usually if gemmlowp is used to accelerate convolution layer, sum_col will not have batches
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
*
* The final result is:
*
@@ -1472,7 +1478,7 @@ __kernel void gemmlowp_offset_contribution(TENSOR3D_DECLARATION(mm_result)
#endif // defined(ADD_BIAS))
)
{
- const int x = get_global_id(0) * 4;
+ const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
const int y = get_global_id(1);
const int z = get_global_id(2);
@@ -1552,6 +1558,8 @@ __kernel void gemmlowp_offset_contribution(TENSOR3D_DECLARATION(mm_result)
* @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
* @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
* These values can be used to implement "rectified linear unit" activation functions
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
*
* @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
* @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
@@ -1615,45 +1623,45 @@ __kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm
#endif // defined(PER_CHANNEL_QUANTIZATION)
)
{
- const int x = get_global_id(0) * 4;
+ const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
const int y = get_global_id(1);
const int z = get_global_id(2);
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
// Compute offset contribution
- int4 offset_term_s32 = offset_contribution(
- x, y, z
+ VEC_INT offset_term_s32 = offset_contribution(
+ x, y, z
#if defined(A_OFFSET)
- ,
- sum_col_ptr,
- sum_col_stride_x,
- sum_col_step_x,
- sum_col_stride_y,
- sum_col_step_y,
- sum_col_offset_first_element_in_bytes
+ ,
+ sum_col_ptr,
+ sum_col_stride_x,
+ sum_col_step_x,
+ sum_col_stride_y,
+ sum_col_step_y,
+ sum_col_offset_first_element_in_bytes
#endif // defined(A_OFFSET)
#if defined(B_OFFSET)
- ,
- sum_row_ptr,
- sum_row_stride_x,
- sum_row_step_x,
- sum_row_stride_y,
- sum_row_step_y,
- sum_row_offset_first_element_in_bytes
+ ,
+ sum_row_ptr,
+ sum_row_stride_x,
+ sum_row_step_x,
+ sum_row_stride_y,
+ sum_row_step_y,
+ sum_row_offset_first_element_in_bytes
#endif // defined(B_OFFSET)
#if defined(ADD_BIAS)
- ,
- biases_ptr,
- biases_stride_x,
- biases_step_x,
- biases_offset_first_element_in_bytes
+ ,
+ biases_ptr,
+ biases_stride_x,
+ biases_step_x,
+ biases_offset_first_element_in_bytes
#endif // defined(ADD_BIAS)
- );
+ );
__global uchar *mm_result_addr = mm_result_ptr + mm_result_offset_first_element_in_bytes + x * sizeof(int) + y * mm_result_stride_y + z * mm_result_stride_z;
- int4 in_s32 = vload4(0, (__global int *)mm_result_addr);
+ VEC_INT in_s32 = VLOAD(VEC_SIZE)(0, (__global int *)mm_result_addr);
// Add the offset terms to GEMM's result
in_s32 += offset_term_s32;
@@ -1661,14 +1669,14 @@ __kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm
// -------------- OUTPUT STAGE
// Add the offset terms to GEMM's result
- in_s32 += (int4)RESULT_OFFSET;
+ in_s32 += (VEC_INT)RESULT_OFFSET;
// Multiply by result_mult_int and shift
#if defined(PER_CHANNEL_QUANTIZATION)
__global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + x * sizeof(int);
__global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + x * sizeof(int);
- int4 result_multipliers_values = vload4(0, (__global int *)result_multipliers_addr);
- int4 result_shifts_values = vload4(0, (__global int *)result_shifts_addr);
+ VEC_INT result_multipliers_values = VLOAD(VEC_SIZE)(0, (__global int *)result_multipliers_addr);
+ VEC_INT result_shifts_values = VLOAD(VEC_SIZE)(0, (__global int *)result_shifts_addr);
in_s32 *= result_multipliers_values;
in_s32 >>= result_shifts_values;
@@ -1678,18 +1686,18 @@ __kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm
in_s32 >>= RESULT_SHIFT;
#endif // defined(PER_CHANNEL_QUANTIZATION)
- VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4)
- res = CONVERT_SAT(in_s32, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4));
+ VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)
+ res0 = CONVERT_SAT(in_s32, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE));
#if defined(MIN_BOUND)
- res = max(res, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MIN_BOUND);
+ res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND);
#endif // defined(MIN_BOUND)
#if defined(MAX_BOUND)
- res = min(res, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MAX_BOUND);
+ res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND);
#endif // defined(MAX_BOUND)
// Store the result
- vstore4(res, 0, (__global OUTPUT_DATA_TYPE *)dst_addr);
+ STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
/* OpenCL kernel used to add the offset contribution after matrix multiplication and it quantizes down to uint8.
@@ -1726,6 +1734,8 @@ __kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm
* @note The output datatype should be passed at compile time using -DOUTPUT_DATA_TYPE
* @note In case the clamping of the result is required, the min and max bounds can be passed at compile time using -DMIN_BOUND and -DMAX_BOUND.
* These values can be used to implement "rectified linear unit" activation functions
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
*
* @param[in] mm_result_ptr Pointer to the source tensor. Supported data type: S32
* @param[in] mm_result_stride_x Stride of the source tensor in X dimension (in bytes)
@@ -1751,7 +1761,7 @@ __kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm
* @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
* @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
- * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8
+ * @param[out] dst_ptr Pointer to the destination tensor Supported data type: QASYMM8/QASYMM8_SIGNED
* @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
@@ -1789,45 +1799,45 @@ __kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DEC
#endif // defined(PER_CHANNEL_QUANTIZATION)
)
{
- const int x = get_global_id(0) * 4;
+ const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
const int y = get_global_id(1);
const int z = get_global_id(2);
// Compute offset contribution
- int4 offset_term_s32 = offset_contribution(
- x, y, z
+ VEC_INT offset_term_s32 = offset_contribution(
+ x, y, z
#if defined(A_OFFSET)
- ,
- sum_col_ptr,
- sum_col_stride_x,
- sum_col_step_x,
- sum_col_stride_y,
- sum_col_step_y,
- sum_col_offset_first_element_in_bytes
+ ,
+ sum_col_ptr,
+ sum_col_stride_x,
+ sum_col_step_x,
+ sum_col_stride_y,
+ sum_col_step_y,
+ sum_col_offset_first_element_in_bytes
#endif // defined(A_OFFSET)
#if defined(B_OFFSET)
- ,
- sum_row_ptr,
- sum_row_stride_x,
- sum_row_step_x,
- sum_row_stride_y,
- sum_row_step_y,
- sum_row_offset_first_element_in_bytes
+ ,
+ sum_row_ptr,
+ sum_row_stride_x,
+ sum_row_step_x,
+ sum_row_stride_y,
+ sum_row_step_y,
+ sum_row_offset_first_element_in_bytes
#endif // defined(B_OFFSET)
#if defined(ADD_BIAS)
- ,
- biases_ptr,
- biases_stride_x,
- biases_step_x,
- biases_offset_first_element_in_bytes
+ ,
+ biases_ptr,
+ biases_stride_x,
+ biases_step_x,
+ biases_offset_first_element_in_bytes
#endif // defined(ADD_BIAS)
- );
+ );
__global uchar *mm_result_addr = mm_result_ptr + mm_result_offset_first_element_in_bytes + x * sizeof(int) + y * mm_result_stride_y + z * mm_result_stride_z;
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z;
- int4 in_s32 = vload4(0, (__global int *)mm_result_addr);
+ VEC_INT in_s32 = VLOAD(VEC_SIZE)(0, (__global int *)mm_result_addr);
// Add the offset terms to GEMM's result
in_s32 += offset_term_s32;
@@ -1838,41 +1848,43 @@ __kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DEC
#if defined(PER_CHANNEL_QUANTIZATION)
__global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + x * sizeof(int);
__global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + x * sizeof(int);
- int4 result_multipliers_values = vload4(0, (__global int *)result_multipliers_addr);
- int4 result_shifts_values = vload4(0, (__global int *)result_shifts_addr);
+ VEC_INT result_multipliers_values = VLOAD(VEC_SIZE)(0, (__global int *)result_multipliers_addr);
+ VEC_INT result_shifts_values = VLOAD(VEC_SIZE)(0, (__global int *)result_shifts_addr);
- int4 in_s32_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(in_s32, result_multipliers_values, result_shifts_values, 4);
- int4 in_s32_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(in_s32, result_multipliers_values, result_shifts_values, 4);
- in_s32 = select(in_s32_shift_lt0, in_s32_shift_gt0, result_shifts_values >= 0);
+ VEC_INT in_s32_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(in_s32, result_multipliers_values, result_shifts_values, VEC_SIZE);
+ VEC_INT in_s32_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(in_s32, result_multipliers_values, result_shifts_values, VEC_SIZE);
+ in_s32 = select(in_s32_shift_lt0, in_s32_shift_gt0, result_shifts_values >= 0);
#else // defined(PER_CHANNEL_QUANTIZATION)
#if RESULT_SHIFT < 0
- in_s32 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(in_s32, RESULT_MULTIPLIER, RESULT_SHIFT, 4);
+ in_s32 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(in_s32, RESULT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE);
#else // RESULT_SHIFT >= 0
- in_s32 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(in_s32, RESULT_MULTIPLIER, RESULT_SHIFT, 4);
+ in_s32 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(in_s32, RESULT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE);
#endif // RESULT_SHIFT < 0
#endif // defined(PER_CHANNEL_QUANTIZATION)
// Add the offset terms to GEMM's result
- in_s32 += (int4)RESULT_OFFSET;
+ in_s32 += (VEC_INT)RESULT_OFFSET;
- VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4)
- res = CONVERT_SAT(in_s32, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4));
+ VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)
+ res0 = CONVERT_SAT(in_s32, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE));
#if defined(MIN_BOUND)
- res = max(res, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MIN_BOUND);
+ res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND);
#endif // defined(MIN_BOUND)
#if defined(MAX_BOUND)
- res = min(res, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, 4))MAX_BOUND);
+ res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND);
#endif // defined(MAX_BOUND)
// Store the result
- vstore4(res, 0, (__global OUTPUT_DATA_TYPE *)dst_addr);
+ STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
#endif // defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT) && defined(OUTPUT_DATA_TYPE)
-#endif // defined(K_OFFSET)
+#undef VEC_INT
+
+#endif // defined(K_OFFSET) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
#if defined(RESULT_OFFSET) && defined(RESULT_MULT_INT) && defined(RESULT_SHIFT)
/** This OpenCL kernel is used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp b/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp
index 7ab96e5fa9..aa4eea60ca 100644
--- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.cpp
@@ -32,16 +32,8 @@
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
-#include <cstddef>
-#include <cstdint>
-
-using namespace arm_compute;
-
namespace arm_compute
{
-class Coordinates;
-} // namespace arm_compute
-
namespace
{
Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
@@ -100,39 +92,6 @@ Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vecto
return Status{};
}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *vector_sum_col, ITensorInfo *vector_sum_row, ITensorInfo *bias,
- int32_t a_offset, int32_t b_offset)
-{
- constexpr unsigned int num_elems_processed_per_iteration = 4;
- bool window_changed = false;
-
- // Configure kernel window
- Window win = calculate_max_window(*mm_result, Steps(num_elems_processed_per_iteration));
-
- AccessWindowHorizontal mm_result_access(mm_result, 0, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, mm_result_access);
-
- if(a_offset != 0)
- {
- AccessWindowHorizontal vector_sum_col_access(vector_sum_col, 0, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, vector_sum_col_access);
- }
- if(b_offset != 0)
- {
- AccessWindowStatic vector_sum_row_access(vector_sum_row, 0, 0, vector_sum_row->dimension(0), 0); // NOLINT
- window_changed = window_changed || update_window_and_padding(win, vector_sum_row_access);
- }
-
- if(bias != nullptr)
- {
- AccessWindowStatic bias_access(bias, 0, 0, ceil_to_multiple(bias->dimension(0), num_elems_processed_per_iteration), bias->tensor_shape()[1]);
- window_changed = window_changed || update_window_and_padding(win, bias_access);
- }
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
} // namespace
CLGEMMLowpOffsetContributionKernel::CLGEMMLowpOffsetContributionKernel()
@@ -159,6 +118,8 @@ void CLGEMMLowpOffsetContributionKernel::configure(const CLCompileContext &compi
bias != nullptr ? bias->info() : nullptr,
a_offset, b_offset)); // NOLINT
+ auto padding_info = get_padding_info({ mm_result, vector_sum_col, vector_sum_row, bias });
+
_vector_sum_col = vector_sum_col;
_vector_sum_row = vector_sum_row;
_mm_result = mm_result;
@@ -169,8 +130,12 @@ void CLGEMMLowpOffsetContributionKernel::configure(const CLCompileContext &compi
&& mm_result->info()->num_dimensions() > 1
&& mm_result->info()->tensor_shape().y() != vector_sum_row->info()->tensor_shape().x();
+ const unsigned int num_elems_processed_per_iteration = adjust_vec_size(4, mm_result->info()->dimension(0));
+
// Set the arguments to pass at compile time
CLBuildOptions build_opts;
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
+ build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(mm_result->info()->dimension(0) % num_elems_processed_per_iteration));
// If a_offset == 0, vector_sum_col can be a nullptr
if(a_offset != 0)
@@ -191,13 +156,8 @@ void CLGEMMLowpOffsetContributionKernel::configure(const CLCompileContext &compi
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
- auto win_config = validate_and_configure_window(mm_result->info(),
- vector_sum_col != nullptr ? vector_sum_col->info() : nullptr,
- vector_sum_row != nullptr ? vector_sum_row->info() : nullptr,
- bias != nullptr ? bias->info() : nullptr,
- a_offset, b_offset); // NOLINT
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
+ Window win = calculate_max_window(*mm_result->info(), Steps(num_elems_processed_per_iteration));
+ ICLKernel::configure_internal(win);
// Set config_id for enabling LWS tuning
_config_id = kernel_name + "_";
@@ -206,19 +166,14 @@ void CLGEMMLowpOffsetContributionKernel::configure(const CLCompileContext &compi
_config_id += support::cpp11::to_string(mm_result->info()->dimension(1));
_config_id += "_";
_config_id += support::cpp11::to_string(mm_result->info()->dimension(2));
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLGEMMLowpOffsetContributionKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
int32_t a_offset, int32_t b_offset)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, a_offset, b_offset));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(mm_result->clone().get(),
- vector_sum_col != nullptr ? vector_sum_col->clone().get() : nullptr,
- vector_sum_row != nullptr ? vector_sum_row->clone().get() : nullptr,
- bias != nullptr ? bias->clone().get() : nullptr,
- a_offset, b_offset)
- .first); // NOLINT
-
return Status{};
}
@@ -257,3 +212,4 @@ void CLGEMMLowpOffsetContributionKernel::run(const Window &window, cl::CommandQu
}
while(collapsed.slide_window_slice_3D(slice));
}
+} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp b/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp
index 85285d6704..afa7bdbfdf 100644
--- a/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.cpp
@@ -34,9 +34,6 @@
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
-#include <cstddef>
-#include <cstdint>
-
namespace arm_compute
{
namespace
@@ -119,52 +116,6 @@ Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vecto
return Status{};
}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *vector_sum_col, ITensorInfo *vector_sum_row, ITensorInfo *bias, ITensorInfo *output,
- int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage, ITensorInfo *output_multipliers, ITensorInfo *output_shifts)
-{
- constexpr unsigned int num_elems_processed_per_iteration = 4;
- bool window_changed = false;
-
- // Auto initialize the output
- auto_init_if_empty(*output, mm_result->clone()->set_data_type(output_stage.output_data_type));
-
- // Configure kernel window
- Window win = calculate_max_window(*mm_result, Steps(num_elems_processed_per_iteration));
-
- AccessWindowHorizontal mm_result_access(mm_result, 0, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, mm_result_access);
-
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, output_access);
-
- if(a_offset != 0)
- {
- AccessWindowHorizontal vector_sum_col_access(vector_sum_col, 0, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, vector_sum_col_access);
- }
- if(b_offset != 0)
- {
- AccessWindowStatic vector_sum_row_access(vector_sum_row, 0, 0, vector_sum_row->dimension(0), 0); // NOLINT
- window_changed = window_changed || update_window_and_padding(win, vector_sum_row_access);
- }
-
- if(bias != nullptr)
- {
- AccessWindowStatic bias_access(bias, 0, 0, ceil_to_multiple(bias->dimension(0), num_elems_processed_per_iteration), bias->tensor_shape()[1]);
- window_changed = window_changed || update_window_and_padding(win, bias_access);
- }
-
- if(output_multipliers->dimension(0) > 1)
- {
- AccessWindowHorizontal output_multipliers_access(output_multipliers, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_shifts_access(output_shifts, 0, num_elems_processed_per_iteration);
- window_changed = window_changed || update_window_and_padding(win, output_multipliers_access, output_shifts_access);
- }
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
} // namespace
CLGEMMLowpOffsetContributionOutputStageKernel::CLGEMMLowpOffsetContributionOutputStageKernel()
@@ -201,6 +152,8 @@ void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const CLCompileCon
a_offset, b_offset, output_stage,
output_multipliers->info(), output_shifts->info())); // NOLINT
+ auto padding_info = get_padding_info({ mm_result, vector_sum_col, vector_sum_row, bias, output, output_multipliers, output_shifts });
+
const int min = output_stage.gemmlowp_min_bound;
const int max = output_stage.gemmlowp_max_bound;
@@ -218,8 +171,15 @@ void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const CLCompileCon
&& mm_result->info()->num_dimensions() > 1
&& mm_result->info()->tensor_shape().y() != vector_sum_row->info()->tensor_shape().x();
+ // Auto initialize the output
+ auto_init_if_empty(*output->info(), mm_result->info()->clone()->set_data_type(output_stage.output_data_type));
+
+ const unsigned int num_elems_processed_per_iteration = adjust_vec_size(4, mm_result->info()->dimension(0));
+
// Set the arguments to pass at compile time
CLBuildOptions build_opts;
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
+ build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(mm_result->info()->dimension(0) % num_elems_processed_per_iteration));
// If a_offset == 0, vector_sum_col can be a nullptr
if(a_offset != 0)
@@ -252,15 +212,8 @@ void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const CLCompileCon
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
// Configure kernel window
- auto win_config = validate_and_configure_window(mm_result->info(),
- vector_sum_col != nullptr ? vector_sum_col->info() : nullptr,
- vector_sum_row != nullptr ? vector_sum_row->info() : nullptr,
- bias != nullptr ? bias->info() : nullptr,
- output->info(),
- a_offset, b_offset, output_stage,
- output_multipliers->info(), output_shifts->info()); // NOLINT
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
+ Window win = calculate_max_window(*mm_result->info(), Steps(num_elems_processed_per_iteration));
+ ICLKernel::configure_internal(win);
// Set config_id for enabling LWS tuning
_config_id = kernel_name + "_";
@@ -269,6 +222,8 @@ void CLGEMMLowpOffsetContributionOutputStageKernel::configure(const CLCompileCon
_config_id += support::cpp11::to_string(mm_result->info()->dimension(1));
_config_id += "_";
_config_id += support::cpp11::to_string(mm_result->info()->dimension(2));
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLGEMMLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
@@ -276,15 +231,6 @@ Status CLGEMMLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo
const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, output, a_offset, b_offset, output_stage, output_multipliers, output_shifts));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(mm_result->clone().get(),
- vector_sum_col != nullptr ? vector_sum_col->clone().get() : nullptr,
- vector_sum_row != nullptr ? vector_sum_row->clone().get() : nullptr,
- bias != nullptr ? bias->clone().get() : nullptr,
- output->clone().get(),
- a_offset, b_offset, output_stage,
- output_multipliers->clone().get(), output_shifts->clone().get())
- .first); // NOLINT
-
return Status{};
}