From 932402276935a5fdf3a15b0c10e5310c7a0ae393 Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Wed, 23 Dec 2020 11:55:29 +0000 Subject: Remove OpenCL padding: CLPadLayerKernel Resolves: COMPMID-3912 Change-Id: I1f8bd3bfec263ebfd70bc96f9183ccdc3089db13 Signed-off-by: Giorgio Arena Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4754 Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins Reviewed-by: Manuel Bottini Reviewed-by: Georgios Pinitas Reviewed-by: Gian Marco Iodice --- src/core/CL/cl_kernels/helpers.h | 6 ++ src/core/CL/cl_kernels/pad_layer.cl | 118 +++++++++++++++++-------------- src/core/CL/kernels/CLPadLayerKernel.cpp | 114 +++++++++++------------------ src/core/CL/kernels/CLPadLayerKernel.h | 2 - 4 files changed, 113 insertions(+), 127 deletions(-) (limited to 'src/core/CL') diff --git a/src/core/CL/cl_kernels/helpers.h b/src/core/CL/cl_kernels/helpers.h index 372ccd91fb..df3b4937b2 100644 --- a/src/core/CL/cl_kernels/helpers.h +++ b/src/core/CL/cl_kernels/helpers.h @@ -110,18 +110,22 @@ * @{ */ #define ROT1_0(x) ((x)) +#define ROT1_1(x) ((x)) #define ROT2_0(x) ((x)) #define ROT2_1(x) ((x).s10) +#define ROT2_2(x) ((x)) #define ROT3_0(x) ((x)) #define ROT3_1(x) ((x).s201) #define ROT3_2(x) ((x).s120) +#define ROT3_3(x) ((x)) #define ROT4_0(x) ((x)) #define ROT4_1(x) ((x).s3012) #define ROT4_2(x) ((x).s2301) #define ROT4_3(x) ((x).s1230) +#define ROT4_4(x) ((x)) #define ROT8_0(x) ((x)) #define ROT8_1(x) ((x).s70123456) @@ -131,6 +135,7 @@ #define ROT8_5(x) ((x).s34567012) #define ROT8_6(x) ((x).s23456701) #define ROT8_7(x) ((x).s12345670) +#define ROT8_8(x) ((x)) #define ROT16_0(x) ((x)) #define ROT16_1(x) ((x).sF0123456789ABCDE) @@ -148,6 +153,7 @@ #define ROT16_13(x) ((x).s3456789ABCDEF012) #define ROT16_14(x) ((x).s23456789ABCDEF01) #define ROT16_15(x) ((x).s123456789ABCDEF0) +#define ROT16_16(x) ((x)) /** @} */ // end of group ROTs_n /** Circular-right-shift (rotate-right) the given vector by the given amount. diff --git a/src/core/CL/cl_kernels/pad_layer.cl b/src/core/CL/cl_kernels/pad_layer.cl index fe71b5d119..903e924a2f 100644 --- a/src/core/CL/cl_kernels/pad_layer.cl +++ b/src/core/CL/cl_kernels/pad_layer.cl @@ -23,14 +23,15 @@ */ #include "helpers.h" -#if defined(DATA_TYPE) && defined(VEC_SIZE) && defined(PAD_X_BEFORE) && defined(SRC_WIDTH) +#if defined(DATA_TYPE) && defined(VEC_SIZE) && defined(PAD_X_BEFORE) && defined(SRC_WIDTH) && defined(PAD_X_BEFORE_REMAINDER) && defined(VEC_SIZE_LEFTOVER_WRITE) #define VEC_TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) #define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE) #define VEC_SELECT SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) #define OFFSETS VEC_OFFS(SELECT_DATA_TYPE(DATA_TYPE), VEC_SIZE) +#define SCALAR_COND(x) CONVERT((VEC_SELECT)x == (VEC_SELECT)1, VEC_SELECT) -#if defined(CONST_VAL) +#if defined(CONST_VAL) && defined(VEC_SIZE_LEFTOVER_READ) /** Perform a pad operation when PaddingMode is CONSTANT * * @note Data type can be passed using the -DDATA_TYPE compile flag, e.g. -DDATA_TYPE=float @@ -39,7 +40,9 @@ * @note Pad to add to the left must be passed using the -DPAD_X_BEFORE compile flag, e.g. -DPAD_X_BEFORE=5 * @note Input tensor's width must be passed using the -DSRC_WIDTH compile flag, e.g. -DSRC_WIDTH=224 * @note In case pad left is more than the vector size, the number of threads to skip along the X axis must be passed using the - * -DNUM_THREADS_TO_SKIP_X compile flag, e.g. -DNUM_THREADS_TO_SKIP_X=1. This is defined as (PAD_X_BEFORE / VEC_SIZE) + * -DTHREADS_TO_SKIP_BEFORE compile flag, e.g. -DTHREADS_TO_SKIP_BEFORE=1. This is defined as (PAD_X_BEFORE / VEC_SIZE) + * @note In case pad left is more than the vector size, the thread from which to skip along the X axis for pad right must be passed using the + * -DTHREADS_TO_SKIP_AFTER compile flag, e.g. -THREADS_TO_SKIP_AFTER=1. This is defined as ((SRC_WIDTH + PAD_X_BEFORE) / VEC_SIZE) * @note If pad also needs to be added to the top of the tensor, the following compile flags must be passed at compile time: * -# -DPAD_Y_BEFORE: Pad to add to the top of the input tensor (e.g. -DPAD_Y_BEFORE=3) * -# -DSRC_HEIGHT: Input tensor's height (e.g. -DSRC_HEIGHT=127) @@ -76,67 +79,77 @@ __kernel void pad_layer_constant(TENSOR3D_DECLARATION(src), #endif // defined(PAD_W_BEFORE) ) { - const int x = get_global_id(0); - const int y = get_global_id(1); - const int z = get_global_id(2); + Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); - uint cond = 0; + int x = get_global_id(0); + int y = get_global_id(1); + int z = get_global_id(2); -#if defined(PAD_W_BEFORE) - cond |= batch < PAD_W_BEFORE || batch >= (SRC_BATCH + PAD_W_BEFORE); -#endif // defined(PAD_W_BEFORE) + // If true, write only padding values; no reads performed + uint cond = 0; +#if defined(THREADS_TO_SKIP_BEFORE) + cond |= x < THREADS_TO_SKIP_BEFORE || x > THREADS_TO_SKIP_AFTER; +#endif // defined(THREADS_TO_SKIP_BEFORE) +#if defined(PAD_Y_BEFORE) + cond |= y < PAD_Y_BEFORE || y >= (SRC_HEIGHT + PAD_Y_BEFORE); +#endif // defined(PAD_Y_BEFORE) #if defined(PAD_Z_BEFORE) cond |= z < PAD_Z_BEFORE || z >= (SRC_DEPTH + PAD_Z_BEFORE); #endif // defined(PAD_Z_BEFORE) +#if defined(PAD_W_BEFORE) + cond |= batch < PAD_W_BEFORE || batch >= (SRC_BATCH + PAD_W_BEFORE); +#endif // defined(PAD_W_BEFORE) if(cond) { - Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); - VSTORE(VEC_SIZE) - ((VEC_TYPE)CONST_VAL, 0, (__global DATA_TYPE *)dst.ptr); + VEC_TYPE const_vals0 = (VEC_TYPE)CONST_VAL; + STORE_VECTOR_SELECT(const_vals, DATA_TYPE, dst.ptr, VEC_SIZE, VEC_SIZE_LEFTOVER_WRITE, get_global_id(0) == (get_global_size(0) - 1)); } else { - Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src); - Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); - -#if defined(NUM_THREADS_TO_SKIP_X) - /* In case the pad left is greater than the vector size, and we are past the threads operating solely on pad values, - * the input pointer must be brought back along the X axis to start from the first non-pad values. - * - * E.g. with VEC_SIZE=2, PAD_X_BEFORE=5, CONST_VAL=0 and 1D input |1 2 3 4 5 6|: - * -# The first thread will compute the output values |0 0| since it detects (x_outs == (0, 1)) < PAD_X_BEFORE - * -# The second thread will compute the output values |0 0| since it detects (x_outs == (2, 3)) < PAD_X_BEFORE - * -# The third thread should compute |0 1|, however the input pointer is now ahead of ((x * VEC_SIZE) == 4) values, reading |4 5| - * -# To detect this, we use ((PAD_X_BEFORE / VEC_SIZE) == NUM_THREADS_TO_SKIP_X == 2) and check that it is >= to the current x - * -# So, we bring the pointer back of NUM_THREADS_TO_SKIP_X threads, which means multiplying this constant by the input's step along the X axis - * -# Now that the pointer is back of ((NUM_THREADS_TO_SKIP_X * src_step_x) == 4) values, it will read the desired values |0 1| - */ - src.ptr -= select(0u, NUM_THREADS_TO_SKIP_X * src_step_x, x >= NUM_THREADS_TO_SKIP_X); -#endif // defined(NUM_THREADS_TO_SKIP_X) + // Calculate input's coordinates based on output's + int w = 0; +#if defined(THREADS_TO_SKIP_BEFORE) + x -= THREADS_TO_SKIP_BEFORE; +#endif // defined(THREADS_TO_SKIP_BEFORE) +#if defined(PAD_Y_BEFORE) + y -= PAD_Y_BEFORE; +#endif // defined(PAD_Y_BEFORE) #if defined(PAD_Z_BEFORE) - src.ptr -= PAD_Z_BEFORE * src_step_z; + z -= PAD_Z_BEFORE; #endif // defined(PAD_Z_BEFORE) #if defined(PAD_W_BEFORE) - src.ptr -= PAD_W_BEFORE * SRC_DEPTH * src_step_z; + w -= PAD_W_BEFORE * SRC_DEPTH; #endif // defined(PAD_W_BEFORE) + x *= VEC_SIZE; + x -= PAD_X_BEFORE_REMAINDER; - VEC_TYPE src_vals = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src.ptr); + // Check for out of bound reads and clamp X coordinate + uint cond_left = x < 0; + uint cond_right = (x + VEC_SIZE) > SRC_WIDTH; + x = clamp(x, 0, (SRC_WIDTH - VEC_SIZE)); - VEC_INT xs_out = (VEC_INT)(x * VEC_SIZE) + CONVERT(OFFSETS, VEC_INT); - VEC_INT cond = xs_out < (VEC_INT)PAD_X_BEFORE || xs_out >= (VEC_INT)(SRC_WIDTH + PAD_X_BEFORE); -#if defined(PAD_Y_BEFORE) - cond |= (VEC_INT)y < (VEC_INT)PAD_Y_BEFORE || (VEC_INT)y >= (VEC_INT)(SRC_HEIGHT + PAD_Y_BEFORE); -#endif // defined(PAD_Y_BEFORE) - VSTORE(VEC_SIZE) - (select(src_vals, (VEC_TYPE)CONST_VAL, CONVERT(cond, VEC_SELECT)), 0, (__global DATA_TYPE *)dst.ptr); + // Calculate input's address + __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * src_stride_x + y * src_stride_y + z * src_stride_z + w * (int)src_stride_z; + + // Read values and rotate them properly if they would have been across paddings + VEC_TYPE src_vals0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src_addr); + src_vals0 = select(src_vals0, ROTATE(src_vals0, VEC_SIZE, PAD_X_BEFORE_REMAINDER), SCALAR_COND(cond_left)); + src_vals0 = select(src_vals0, ROTATE(src_vals0, VEC_SIZE, VEC_SIZE_LEFTOVER_READ), SCALAR_COND(cond_right)); + + // Check what values would be padding and replace them with the constant value + VEC_INT xs_out = (VEC_INT)(get_global_id(0) * VEC_SIZE) + VEC_OFFS(int, VEC_SIZE); + VEC_INT conds = xs_out < (VEC_INT)PAD_X_BEFORE || xs_out >= (VEC_INT)(SRC_WIDTH + PAD_X_BEFORE); + src_vals0 = select(src_vals0, (VEC_TYPE)CONST_VAL, CONVERT(conds, VEC_SELECT)); + + // Store values in bounds + STORE_VECTOR_SELECT(src_vals, DATA_TYPE, dst.ptr, VEC_SIZE, VEC_SIZE_LEFTOVER_WRITE, get_global_id(0) == (get_global_size(0) - 1)); } } -#endif // defined(CONST_VAL) +#endif // defined(CONST_VAL) && defined(VEC_SIZE_LEFTOVER_READ) -#if defined(PAD_X_BEFORE_REMAINDER) && defined(PAD_X_AFTER_REMAINDER) && defined(PAD_X_BEFORE_REMAINDER_REFL) && defined(PAD_X_AFTER_REMAINDER_REFL) && defined(AFTER_PAD_FACT_X) +#if defined(IS_REFLECT) && defined(PAD_X_AFTER_REMAINDER) && defined(PAD_X_BEFORE_REMAINDER_REFL) && defined(PAD_X_AFTER_REMAINDER_REFL) && defined(AFTER_PAD_FACT_X) -#define SCALAR_COND(x) (VEC_SELECT) x == (VEC_SELECT)1 #define ROTATE_REVERSE(x, n) ROTATE(REVERSE(x, VEC_SIZE), VEC_SIZE, n) #define SYMM_REFL_LEFT(x, n0, n1) select(ROTATE_REVERSE(x, n1), ROTATE(x, VEC_SIZE, n0), OFFSETS >= (VEC_SELECT)n0) #define SYMM_REFL_RIGHT(x, n0, n1) select(ROTATE(x, VEC_SIZE, n0), ROTATE_REVERSE(x, n1), OFFSETS >= (VEC_SELECT)n0) @@ -232,20 +245,19 @@ __kernel void pad_layer_symmetric_reflect(TENSOR3D_DECLARATION(src), ((VEC_TYPE)(*(__global DATA_TYPE *)src_addr), 0, (__global DATA_TYPE *)dst.ptr); #else // SRC_WIDTH == 1 - VEC_TYPE src_vals = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src_addr); + VEC_TYPE src_vals0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src_addr); // Choose rearrangement policy based on the defined conditions - src_vals = select(src_vals, SYMM_REFL_LEFT(src_vals, PAD_X_BEFORE_REMAINDER, PAD_X_BEFORE_REMAINDER_REFL), SCALAR_COND(is_across_pad_left)); - src_vals = select(src_vals, SYMM_REFL_RIGHT(src_vals, PAD_X_AFTER_REMAINDER, PAD_X_AFTER_REMAINDER_REFL), SCALAR_COND(is_across_pad_right)); - src_vals = select(src_vals, REVERSE(src_vals, VEC_SIZE), SCALAR_COND((is_before_pad_left || is_after_pad_right))); + src_vals0 = select(src_vals0, SYMM_REFL_LEFT(src_vals0, PAD_X_BEFORE_REMAINDER, PAD_X_BEFORE_REMAINDER_REFL), SCALAR_COND(is_across_pad_left)); + src_vals0 = select(src_vals0, SYMM_REFL_RIGHT(src_vals0, PAD_X_AFTER_REMAINDER, PAD_X_AFTER_REMAINDER_REFL), SCALAR_COND(is_across_pad_right)); + src_vals0 = select(src_vals0, REVERSE(src_vals0, VEC_SIZE), SCALAR_COND((is_before_pad_left || is_after_pad_right))); #if defined(AFTER_PAD_REM) - src_vals = select(src_vals, ROTATE(src_vals, VEC_SIZE, AFTER_PAD_REM), SCALAR_COND(neg_offs)); + src_vals0 = select(src_vals0, ROTATE(src_vals0, VEC_SIZE, AFTER_PAD_REM), SCALAR_COND(neg_offs)); #endif // defined(AFTER_PAD_REM) - // Store - VSTORE(VEC_SIZE) - (src_vals, 0, (__global DATA_TYPE *)dst.ptr); + // Store values in bounds + STORE_VECTOR_SELECT(src_vals, DATA_TYPE, dst.ptr, VEC_SIZE, VEC_SIZE_LEFTOVER_WRITE, get_global_id(0) == (get_global_size(0) - 1)); #endif // SRC_WIDTH == 1 } -#endif // defined(PAD_X_BEFORE_REMAINDER) && defined(PAD_X_AFTER_REMAINDER) && defined(PAD_X_BEFORE_REMAINDER_REFL) && defined(PAD_X_AFTER_REMAINDER_REFL) && defined(AFTER_PAD_FACT_X) -#endif // defined(DATA_TYPE) && defined(VEC_SIZE) && defined(PAD_X_BEFORE) && defined(SRC_WIDTH) +#endif // defined(IS_REFLECT) && defined(PAD_X_AFTER_REMAINDER) && defined(PAD_X_BEFORE_REMAINDER_REFL) && defined(PAD_X_AFTER_REMAINDER_REFL) && defined(AFTER_PAD_FACT_X) +#endif // defined(DATA_TYPE) && defined(VEC_SIZE) && defined(PAD_X_BEFORE) && defined(SRC_WIDTH) && defined(PAD_X_BEFORE_REMAINDER) && defined(VEC_SIZE_LEFTOVER_WRITE) diff --git a/src/core/CL/kernels/CLPadLayerKernel.cpp b/src/core/CL/kernels/CLPadLayerKernel.cpp index 485676667c..2f54b390d5 100644 --- a/src/core/CL/kernels/CLPadLayerKernel.cpp +++ b/src/core/CL/kernels/CLPadLayerKernel.cpp @@ -39,7 +39,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_UNUSED(constant_value); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); - ARM_COMPUTE_RETURN_ERROR_ON(padding.size() > input->num_dimensions()); + ARM_COMPUTE_RETURN_ERROR_ON((padding.size() < 1) || (padding.size() > input->num_dimensions())); if(mode == PaddingMode::REFLECT || mode == PaddingMode::SYMMETRIC) { ARM_COMPUTE_RETURN_ERROR_ON(padding.size() > 3); @@ -62,40 +62,10 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c return Status{}; } - -std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode, - unsigned int &num_elems_processed_per_iteration) -{ - ARM_COMPUTE_UNUSED(constant_value, mode); - - const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding); - auto_init_if_empty(*output, input->clone()->set_tensor_shape(padded_shape)); - - num_elems_processed_per_iteration = std::min(16U, 32U / static_cast(element_size_from_data_type(input->data_type()))); - if(input->dimension(0) < num_elems_processed_per_iteration) - { - num_elems_processed_per_iteration = 1 << static_cast(std::log2(input->dimension(0))); - } - - // Configure kernel window - Window win = calculate_max_window(*output, Steps(num_elems_processed_per_iteration)); - - const int input_start_x = mode == PaddingMode::CONSTANT ? -(padding.at(0).first % num_elems_processed_per_iteration) : 0; - const int input_start_y = (mode == PaddingMode::CONSTANT && padding.size() > 1) ? -padding.at(1).first : 0; - - AccessWindowRectangle input_access(input, input_start_x, input_start_y, num_elems_processed_per_iteration, 1); - AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration); - - const bool window_changed = update_window_and_padding(win, input_access, output_access); - output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); - - Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; - return std::make_pair(err, win); -} } // namespace CLPadLayerKernel::CLPadLayerKernel() - : _input(nullptr), _output(nullptr), _input_start_x(0), _input_start_y(0), _4d_enabled(false) + : _input(nullptr), _output(nullptr), _4d_enabled(false) { } @@ -106,40 +76,36 @@ void CLPadLayerKernel::configure(const ICLTensor *input, ICLTensor *output, cons void CLPadLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode) { - // Perform validation step ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(misc::shape_calculator::compute_padded_shape(input->info()->tensor_shape(), padding))); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), padding, constant_value, mode)); + auto padding_info = get_padding_info({ input, output }); + _input = input; _output = output; _4d_enabled = (mode == PaddingMode::CONSTANT) && (padding.size() > 3); - // Configure window - unsigned int vec_size; - auto win_config = validate_and_configure_window(input->info(), output->info(), padding, constant_value, mode, vec_size); - ARM_COMPUTE_ERROR_THROW_ON(win_config.first); - ICLKernel::configure_internal(win_config.second); - // Set build options - std::string kernel_name = "pad_layer_"; - - const DataType &data_type = input->info()->data_type(); - const unsigned int input_width = input->info()->dimension(0); - const unsigned int input_height = input->info()->dimension(1); - const unsigned int input_depth = input->info()->dimension(2); - const unsigned int pad_x_before = padding.at(0).first; - const unsigned int pad_y_before = padding.size() > 1 ? padding.at(1).first : 0; - const unsigned int pad_z_before = padding.size() > 2 ? padding.at(2).first : 0; - const unsigned int pad_right_start = input_width + pad_x_before; - - _input_start_x = mode == PaddingMode::CONSTANT ? -(pad_x_before % vec_size) : 0; - _input_start_y = (mode == PaddingMode::CONSTANT && padding.size() > 1) ? -padding.at(1).first : 0; + const DataType &data_type = input->info()->data_type(); + const unsigned int input_width = input->info()->dimension(0); + const unsigned int input_height = input->info()->dimension(1); + const unsigned int input_depth = input->info()->dimension(2); + const unsigned int pad_x_before = padding.at(0).first; + const unsigned int pad_y_before = padding.size() > 1 ? padding.at(1).first : 0; + const unsigned int pad_z_before = padding.size() > 2 ? padding.at(2).first : 0; + const unsigned int vec_size = adjust_vec_size(std::min(16U, 32U / static_cast(element_size_from_data_type(input->info()->data_type()))), input_width); + const unsigned int pad_right_start = input_width + pad_x_before; + const unsigned int pad_x_before_remainder = pad_x_before % vec_size; + const unsigned int vec_size_leftover_write = vec_size - (ceil_to_multiple(output->info()->dimension(0), vec_size) - output->info()->dimension(0)); CLBuildOptions build_opts; build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)); build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size)); build_opts.add_option("-DPAD_X_BEFORE=" + support::cpp11::to_string(pad_x_before)); build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(input_width)); + build_opts.add_option("-DPAD_X_BEFORE_REMAINDER=" + support::cpp11::to_string(pad_x_before_remainder)); + build_opts.add_option("-DVEC_SIZE_LEFTOVER_WRITE=" + support::cpp11::to_string(vec_size_leftover_write)); if(padding.size() > 1) { build_opts.add_option("-DPAD_Y_BEFORE=" + support::cpp11::to_string(pad_y_before)); @@ -152,15 +118,23 @@ void CLPadLayerKernel::configure(const CLCompileContext &compile_context, const } } + std::string kernel_name = "pad_layer_"; switch(mode) { case PaddingMode::CONSTANT: { kernel_name += "constant"; + const unsigned int vec_size_leftover_read = vec_size - (ceil_to_multiple(pad_right_start, vec_size) - pad_right_start); + build_opts.add_option("-DCONST_VAL=" + string_from_pixel_value(constant_value, data_type)); - build_opts.add_option_if(pad_x_before >= vec_size, "-DNUM_THREADS_TO_SKIP_X=" + support::cpp11::to_string(pad_x_before / vec_size)); + build_opts.add_option("-DVEC_SIZE_LEFTOVER_READ=" + support::cpp11::to_string(vec_size_leftover_read)); + if(pad_x_before >= vec_size) + { + build_opts.add_option("-DTHREADS_TO_SKIP_BEFORE=" + support::cpp11::to_string(pad_x_before / vec_size)); + build_opts.add_option("-DTHREADS_TO_SKIP_AFTER=" + support::cpp11::to_string(pad_right_start / vec_size)); + } if(_4d_enabled) { build_opts.add_option("-DPAD_W_BEFORE=" + support::cpp11::to_string(padding.at(3).first)); @@ -176,13 +150,11 @@ void CLPadLayerKernel::configure(const CLCompileContext &compile_context, const const auto is_reflect = static_cast(mode == PaddingMode::REFLECT); - const unsigned int pad_x_before_remainder = pad_x_before % vec_size; - const unsigned int pad_x_after_remainder = pad_right_start % vec_size; - const unsigned int after_pad_fact_x = (2 * input_width + pad_x_before) - is_reflect; - const unsigned int output_last_x = ceil_to_multiple(pad_right_start + padding.at(0).second, vec_size); + const unsigned int pad_x_after_remainder = pad_right_start % vec_size; + const unsigned int after_pad_fact_x = (2 * input_width + pad_x_before) - is_reflect; + const unsigned int output_last_x = ceil_to_multiple(pad_right_start + padding.at(0).second, vec_size); build_opts.add_option("-DIS_REFLECT=" + support::cpp11::to_string(is_reflect)); - build_opts.add_option("-DPAD_X_BEFORE_REMAINDER=" + support::cpp11::to_string(pad_x_before_remainder)); build_opts.add_option("-DPAD_X_AFTER_REMAINDER=" + support::cpp11::to_string(pad_x_after_remainder)); build_opts.add_option("-DPAD_X_BEFORE_REMAINDER_REFL=" + support::cpp11::to_string((pad_x_before_remainder + is_reflect) % vec_size)); build_opts.add_option("-DPAD_X_AFTER_REMAINDER_REFL=" + support::cpp11::to_string((pad_x_after_remainder - is_reflect) % vec_size)); @@ -197,14 +169,17 @@ void CLPadLayerKernel::configure(const CLCompileContext &compile_context, const // Create kernel _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); + + // Configure window + Window win = calculate_max_window(*output->info(), Steps(vec_size)); + ICLKernel::configure_internal(win); + + ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info)); } Status CLPadLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding, PixelValue constant_value, PaddingMode mode) { - unsigned int vec_size; ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, padding, constant_value, mode)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), padding, constant_value, mode, vec_size).first); - return Status{}; } @@ -213,25 +188,20 @@ void CLPadLayerKernel::run(const Window &window, cl::CommandQueue &queue) ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); - Window win_in = window; - win_in.adjust(Window::DimX, _input_start_x, true); - win_in.adjust(Window::DimY, _input_start_y, true); - - Window slice_out = window.first_slice_window_3D(); - Window slice_in = win_in.first_slice_window_3D(); - unsigned int batch = 0; + Window slice = window.first_slice_window_3D(); + unsigned int batch = 0; do { unsigned int idx = 0; - add_3D_tensor_argument(idx, _input, slice_in); - add_3D_tensor_argument(idx, _output, slice_out); + add_3D_tensor_argument(idx, _input, slice); + add_3D_tensor_argument(idx, _output, slice); if(_4d_enabled) { add_argument(idx, batch++); } - enqueue(queue, *this, slice_out, lws_hint()); + enqueue(queue, *this, slice, lws_hint()); } - while(window.slide_window_slice_3D(slice_out) && win_in.slide_window_slice_3D(slice_in)); + while(window.slide_window_slice_3D(slice)); } } // namespace arm_compute diff --git a/src/core/CL/kernels/CLPadLayerKernel.h b/src/core/CL/kernels/CLPadLayerKernel.h index 2b0abb18df..90af337f94 100644 --- a/src/core/CL/kernels/CLPadLayerKernel.h +++ b/src/core/CL/kernels/CLPadLayerKernel.h @@ -88,8 +88,6 @@ public: private: const ICLTensor *_input; ICLTensor *_output; - int _input_start_x; - int _input_start_y; bool _4d_enabled; }; } // namespace arm_compute -- cgit v1.2.1