aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2020-10-19 15:31:45 +0100
committerMichele Di Giorgio <michele.digiorgio@arm.com>2020-10-21 16:17:01 +0000
commitaae3410bfd58b9aeed4964856b84d7d555b91c3e (patch)
treebddce57867636df26d39e6c928799e0a5177099c
parent861c2ec6502995d5ada21b06addd92cfeaa7a354 (diff)
downloadComputeLibrary-aae3410bfd58b9aeed4964856b84d7d555b91c3e.tar.gz
COMPMID-3729: Remove OpenCL padding: CLGEMMLowpReductionKernel
Added utility functions developed by Giorgio for checking that padding remains unchanged after configure. Change-Id: I6862e74baf9b8792991e3f25e176c672c0a46836 Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4208 Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/CL/cl_kernels/gemmlowp.cl50
-rw-r--r--src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp42
2 files changed, 43 insertions, 49 deletions
diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/gemmlowp.cl
index 9bdd5a2d0e..cc0d583e7d 100644
--- a/src/core/CL/cl_kernels/gemmlowp.cl
+++ b/src/core/CL/cl_kernels/gemmlowp.cl
@@ -1238,7 +1238,7 @@ __kernel void gemmlowp_matrix_a_reduction_dot8(TENSOR3D_DECLARATION(src),
#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
#endif // defined(COLS_A)
-#if defined(COLS_B) && defined(ROWS_B)
+#if defined(COLS_B) && defined(ROWS_B) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
/** OpenCL kernel used to compute the row-vectors of sums of all the entries in each column of Matrix B.
* It is also possible to multiply each reduced column by a scalar value, if SCALAR is passed at compile time.
*
@@ -1249,6 +1249,8 @@ __kernel void gemmlowp_matrix_a_reduction_dot8(TENSOR3D_DECLARATION(src),
* @note The input data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=uchar)
* @note The data type for the accumulation must be passed at compile time using -DACC_DATA_TYPE (i.e. -DACC_DATA_TYPE=uint)
* @note In case of scaling the scalar value must be passed at compile time using -DSCALAR (i.e. -DSCALAR=3)
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
*
* @param[in] src_ptr Pointer to the source tensor. Supported data type: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
* @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
@@ -1269,29 +1271,30 @@ __kernel void gemmlowp_matrix_b_reduction(TENSOR3D_DECLARATION(src),
IMAGE_DECLARATION(dst))
{
// Compute source and destination addresses
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
+ const uint x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
+ const uint y = get_global_id(1);
- VEC_DATA_TYPE(ACC_DATA_TYPE, 16)
- sum_col_32 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 16))0;
+ __global const DATA_TYPE *matrix_b = (__global const DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + y * src_step_y + y * src_stride_z);
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs * sizeof(int) + y * dst_stride_y;
- __global const DATA_TYPE *matrix_b = (__global const DATA_TYPE *)(src.ptr + get_global_id(1) * src_stride_z);
+ VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)
+ sum_col_32_0 = (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))0;
int i = 0;
// This for loop performs 4 accumulations
for(; i <= ((int)ROWS_B - 4); i += 4)
{
- const VEC_DATA_TYPE(DATA_TYPE, 16)
- b0 = vload16(0, matrix_b + 0 * src_stride_y);
- const VEC_DATA_TYPE(DATA_TYPE, 16)
- b1 = vload16(0, matrix_b + 1 * src_stride_y);
- const VEC_DATA_TYPE(DATA_TYPE, 16)
- b2 = vload16(0, matrix_b + 2 * src_stride_y);
- const VEC_DATA_TYPE(DATA_TYPE, 16)
- b3 = vload16(0, matrix_b + 3 * src_stride_y);
-
- sum_col_32 += CONVERT(b0, VEC_DATA_TYPE(ACC_DATA_TYPE, 16)) + CONVERT(b1, VEC_DATA_TYPE(ACC_DATA_TYPE, 16)) + CONVERT(b2, VEC_DATA_TYPE(ACC_DATA_TYPE, 16)) + CONVERT(b3, VEC_DATA_TYPE(ACC_DATA_TYPE,
- 16));
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ b0 = VLOAD(VEC_SIZE)(0, matrix_b + 0 * src_stride_y);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ b1 = VLOAD(VEC_SIZE)(0, matrix_b + 1 * src_stride_y);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ b2 = VLOAD(VEC_SIZE)(0, matrix_b + 2 * src_stride_y);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ b3 = VLOAD(VEC_SIZE)(0, matrix_b + 3 * src_stride_y);
+
+ sum_col_32_0 += CONVERT(b0, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)) + CONVERT(b1, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)) + CONVERT(b2, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)) + CONVERT(b3,
+ VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
matrix_b += 4 * src_stride_y;
}
@@ -1299,21 +1302,20 @@ __kernel void gemmlowp_matrix_b_reduction(TENSOR3D_DECLARATION(src),
// This for loop perfoms the leftover accumulations
for(; i < (int)ROWS_B; ++i)
{
- const VEC_DATA_TYPE(DATA_TYPE, 16)
- b0 = vload16(0, matrix_b);
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ b0 = VLOAD(VEC_SIZE)(0, matrix_b);
- sum_col_32 += CONVERT(b0, VEC_DATA_TYPE(ACC_DATA_TYPE, 16));
+ sum_col_32_0 += CONVERT(b0, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE));
matrix_b += src_stride_y;
}
#if defined(SCALAR)
- sum_col_32 *= (VEC_DATA_TYPE(ACC_DATA_TYPE, 16))SCALAR;
+ sum_col_32_0 *= (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))SCALAR;
#endif // defined(SCALAR)
- VSTORE(16)
- (convert_int16(sum_col_32), 0, (__global int *)dst.ptr);
+ STORE_VECTOR_SELECT(sum_col_32_, int, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
-#endif // defined(COLS_B) && defined(ROWS_B)
+#endif // defined(COLS_B) && defined(ROWS_B) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER)
#endif // defined(DATA_TYPE) && defined(ACC_DATA_TYPE)
diff --git a/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp b/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp
index 826b265dbf..339049ff9a 100644
--- a/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpReductionKernel.cpp
@@ -60,27 +60,6 @@ Status validate_arguments_matrix_b_reduction(const ITensorInfo *input, const ITe
}
return Status{};
}
-
-std::pair<Status, Window> validate_and_configure_window_matrix_b_reduction(ITensorInfo *input, ITensorInfo *output)
-{
- constexpr unsigned int num_elems_processed_per_iteration = 16;
-
- // Output auto initialization if not yet initialized
- auto_init_if_empty(*output, TensorShape(input->dimension(0)), 1, DataType::S32);
-
- // Configure kernel window
- Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration));
-
- AccessWindowStatic input_access(input, 0, 0, ceil_to_multiple(input->dimension(0), num_elems_processed_per_iteration), input->dimension(1));
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
-
- bool window_changed = update_window_and_padding(win, input_access, output_access);
-
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->tensor_shape()));
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_pair(err, win);
-}
} // namespace
ICLGEMMLowpReductionKernel::ICLGEMMLowpReductionKernel()
@@ -102,6 +81,8 @@ void CLGEMMLowpMatrixAReductionKernel::configure(const CLCompileContext &compile
// Output auto initialization if not yet initialized
auto_init_if_empty(*vector_sum_row->info(), TensorShape(mtx_a->info()->dimension(1)), 1, DataType::S32);
+ auto padding_info = get_padding_info({ mtx_a, vector_sum_row });
+
_input = mtx_a;
_output = vector_sum_row;
@@ -131,6 +112,8 @@ void CLGEMMLowpMatrixAReductionKernel::configure(const CLCompileContext &compile
_config_id += support::cpp11::to_string(_input->info()->dimension(1));
_config_id += "_";
_config_id += support::cpp11::to_string(_input->info()->dimension(2));
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLGEMMLowpMatrixAReductionKernel::validate(const ITensorInfo *mtx_a, const ITensorInfo *vector_sum_row, const GEMMLowpReductionKernelInfo &info)
@@ -178,8 +161,17 @@ void CLGEMMLowpMatrixBReductionKernel::configure(const CLCompileContext &compile
_input = mtx_b;
_output = vector_sum_col;
+ // Output auto initialization if not yet initialized
+ auto_init_if_empty(*_output->info(), TensorShape(mtx_b->info()->dimension(0)), 1, DataType::S32);
+
+ auto padding_info = get_padding_info({ mtx_b, vector_sum_col });
+
+ const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16, mtx_b->info()->dimension(0));
+
// Set the arguments to pass at compile time
CLBuildOptions build_opts;
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
+ build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(mtx_b->info()->dimension(0) % num_elems_processed_per_iteration));
build_opts.add_option("-DCOLS_B=" + support::cpp11::to_string(mtx_b->info()->dimension(0)));
build_opts.add_option("-DROWS_B=" + support::cpp11::to_string(mtx_b->info()->dimension(1)));
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(mtx_b->info()->data_type()));
@@ -190,16 +182,16 @@ void CLGEMMLowpMatrixBReductionKernel::configure(const CLCompileContext &compile
_kernel = create_kernel(compile_context, "gemmlowp_matrix_b_reduction", build_opts.options());
// Configure kernel window
- auto win_config = validate_and_configure_window_matrix_b_reduction(_input->info(), _output->info());
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
+ Window win = calculate_max_window(*_output->info(), Steps(num_elems_processed_per_iteration));
+ ICLKernel::configure_internal(win);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLGEMMLowpMatrixBReductionKernel::validate(const ITensorInfo *mtx_b, const ITensorInfo *vector_sum_col, const GEMMLowpReductionKernelInfo &info)
{
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_matrix_b_reduction(mtx_b, vector_sum_col));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_matrix_b_reduction(mtx_b->clone().get(), vector_sum_col->clone().get()).first);
return Status{};
}