From 0f3d5971491c83afc01a4208eb42858a4a1ae354 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Tue, 5 Jan 2021 11:36:16 +0000 Subject: Remove OpenCL padding CLTransposeKernel By handling more general NxM blocks (where M and N can be 1,2,4,8,16) instead of only 4x4, 8x8, 16x16 and managing corner left values with partial stores Resolves: COMPMID-3923 Change-Id: I49b1a560c8325e00e061bd04edcf55034d04dcd8 Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4780 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: Giorgio Arena Comments-Addressed: Arm Jenkins --- src/core/CL/cl_kernels/transpose.cl | 320 +++++++++++++++--------------- src/core/CL/kernels/CLTransposeKernel.cpp | 58 ++---- src/runtime/CL/functions/CLTranspose.cpp | 7 +- tests/validation/CL/Transpose.cpp | 16 +- 4 files changed, 193 insertions(+), 208 deletions(-) diff --git a/src/core/CL/cl_kernels/transpose.cl b/src/core/CL/cl_kernels/transpose.cl index 785be6c710..832572bf0e 100644 --- a/src/core/CL/cl_kernels/transpose.cl +++ b/src/core/CL/cl_kernels/transpose.cl @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,116 +21,93 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#include "helpers.h" - -#define SWAP_ROW(u0, l0) \ - ({ \ - tmp_swap = u0; \ - u0 = l0; \ - l0 = tmp_swap; \ - }) - -#define SWAP_4x4(u0, u1, u2, u3, l0, l1, l2, l3) \ - ({ \ - VEC_DATA_TYPE(DATA_TYPE, 4) \ - tmp_swap; \ - SWAP_ROW(u0, l0); \ - SWAP_ROW(u1, l1); \ - SWAP_ROW(u2, l2); \ - SWAP_ROW(u3, l3); \ - }) - -#define SWAP_8x8(u0, u1, u2, u3, u4, u5, u6, u7, l0, l1, l2, l3, l4, l5, l6, l7) \ - ({ \ - VEC_DATA_TYPE(DATA_TYPE, 8) \ - tmp_swap; \ - SWAP_ROW(u0, l0); \ - SWAP_ROW(u1, l1); \ - SWAP_ROW(u2, l2); \ - SWAP_ROW(u3, l3); \ - SWAP_ROW(u4, l4); \ - SWAP_ROW(u5, l5); \ - SWAP_ROW(u6, l6); \ - SWAP_ROW(u7, l7); \ - }) - -#define TRANSPOSE_4x4(u0, u1, u2, u3) \ - ({ \ - VEC_DATA_TYPE(DATA_TYPE, 4) \ - tmp; \ - tmp.s012 = u0.s123; \ - u0.s1 = u1.s0; \ - u0.s2 = u2.s0; \ - u0.s3 = u3.s0; \ - u1.s0 = tmp.s0; \ - u2.s0 = tmp.s1; \ - u3.s0 = tmp.s2; \ - \ - tmp.s01 = u1.s23; \ - u1.s2 = u2.s1; \ - u1.s3 = u3.s1; \ - u2.s1 = tmp.s0; \ - u3.s1 = tmp.s1; \ - \ - tmp.s0 = u2.s3; \ - u2.s3 = u3.s2; \ - u3.s2 = tmp.s0; \ - }) +#define PARTIAL_STORE_M0 VEC_SIZE_LEFTOVER_X +#define PARTIAL_STORE_N0 VEC_SIZE_LEFTOVER_Y -#define TRANSPOSE_8x8(u0, u1, u2, u3, u4, u5, u6, u7) \ - ({ \ - TRANSPOSE_4x4(u0.s0123, u1.s0123, u2.s0123, u3.s0123); \ - TRANSPOSE_4x4(u0.s4567, u1.s4567, u2.s4567, u3.s4567); \ - TRANSPOSE_4x4(u4.s0123, u5.s0123, u6.s0123, u7.s0123); \ - TRANSPOSE_4x4(u4.s4567, u5.s4567, u6.s4567, u7.s4567); \ - SWAP_4x4(u0.s4567, u1.s4567, u2.s4567, u3.s4567, u4.s0123, u5.s0123, u6.s0123, u7.s0123); \ - }) - -#define TRANSPOSE_16x16(u0, u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13, u14, u15) \ - ({ \ - TRANSPOSE_8x8(u0.s01234567, u1.s01234567, u2.s01234567, u3.s01234567, u4.s01234567, u5.s01234567, u6.s01234567, u7.s01234567); \ - TRANSPOSE_8x8(u0.s89ABCDEF, u1.s89ABCDEF, u2.s89ABCDEF, u3.s89ABCDEF, u4.s89ABCDEF, u5.s89ABCDEF, u6.s89ABCDEF, u7.s89ABCDEF); \ - TRANSPOSE_8x8(u8.s01234567, u9.s01234567, u10.s01234567, u11.s01234567, u12.s01234567, u13.s01234567, u14.s01234567, u15.s01234567); \ - TRANSPOSE_8x8(u8.s89ABCDEF, u9.s89ABCDEF, u10.s89ABCDEF, u11.s89ABCDEF, u12.s89ABCDEF, u13.s89ABCDEF, u14.s89ABCDEF, u15.s89ABCDEF); \ - SWAP_8x8(u0.s89ABCDEF, u1.s89ABCDEF, u2.s89ABCDEF, u3.s89ABCDEF, u4.s89ABCDEF, u5.s89ABCDEF, u6.s89ABCDEF, u7.s89ABCDEF, \ - u8.s01234567, u9.s01234567, u10.s01234567, u11.s01234567, u12.s01234567, u13.s01234567, u14.s01234567, u15.s01234567); \ - }) +#include "helpers.h" +#include "repeat.h" -#ifndef DATA_TYPE_IN_BYTES -#error DATA_TYPE_IN_BYTES not set for the transpose OpenCL kernel -#endif /* not DATA_TYPE_IN_BYTES */ +#if defined(DATA_TYPE_IN_BYTES) && defined(VEC_SIZE_X) && defined(VEC_SIZE_LEFTOVER_X) && defined(VEC_SIZE_Y) && defined(VEC_SIZE_LEFTOVER_Y) -#undef VLOAD -#undef VSTORE +#if VEC_SIZE_X == 1 +#if VEC_SIZE_Y == 1 +#define TRANSPOSED_U(val) \ + { \ + u0 \ + } +#elif VEC_SIZE_Y == 2 +#define TRANSPOSED_U(val) \ + { \ + u0, u1 \ + } +#elif VEC_SIZE_Y == 4 +#define TRANSPOSED_U(val) \ + { \ + u0, u1, u2, u3 \ + } +#elif VEC_SIZE_Y == 8 +#define TRANSPOSED_U(val) \ + { \ + u0, u1, u2, u3, u4, u5, u6, u7 \ + } +#elif VEC_SIZE_Y == 16 +#define TRANSPOSED_U(val) \ + { \ + u0, u1, u2, u3, u4, u5, u6, u7, \ + u8, u9, u10, u11, u12, u13, u14, u15 \ + } +#endif /* switch VEC_SIZE_Y */ +#else // VEC_SIZE_X == 1 +#if VEC_SIZE_Y == 1 +#define TRANSPOSED_U(val) \ + { \ + u0.val \ + } +#elif VEC_SIZE_Y == 2 +#define TRANSPOSED_U(val) \ + { \ + u0.val, u1.val \ + } +#elif VEC_SIZE_Y == 4 +#define TRANSPOSED_U(val) \ + { \ + u0.val, u1.val, u2.val, u3.val \ + } +#elif VEC_SIZE_Y == 8 +#define TRANSPOSED_U(val) \ + { \ + u0.val, u1.val, u2.val, u3.val, u4.val, u5.val, u6.val, u7.val \ + } +#elif VEC_SIZE_Y == 16 +#define TRANSPOSED_U(val) \ + { \ + u0.val, u1.val, u2.val, u3.val, u4.val, u5.val, u6.val, u7.val, \ + u8.val, u9.val, u10.val, u11.val, u12.val, u13.val, u14.val, u15.val \ + } +#endif /* switch VEC_SIZE_Y */ +#endif // VEC_SIZE_X == 1 #if DATA_TYPE_IN_BYTES == 4 #define DATA_TYPE uint -#define TRANSPOSE() TRANSPOSE_4x4(u0, u1, u2, u3) -#define VLOAD(x, y) vload4(x, y) -#define VSTORE(x, y, z) vstore4(x, y, z) -#define BLOCK_SIZE 4 #elif DATA_TYPE_IN_BYTES == 2 #define DATA_TYPE ushort -#define TRANSPOSE() TRANSPOSE_8x8(u0, u1, u2, u3, u4, u5, u6, u7) -#define VLOAD(x, y) vload8(x, y) -#define VSTORE(x, y, z) vstore8(x, y, z) -#define BLOCK_SIZE 8 #elif DATA_TYPE_IN_BYTES == 1 #define DATA_TYPE uchar -#define TRANSPOSE() TRANSPOSE_16x16(u0, u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13, u14, u15) -#define VLOAD(x, y) vload16(x, y) -#define VSTORE(x, y, z) vstore16(x, y, z) -#define BLOCK_SIZE 16 #else /* switch DATA_TYPE_IN_BYTES */ #error DATA_TYPE_IN_BYTES not supported for transpose #endif /* switch DATA_TYPE_IN_BYTES */ /** This OpenCL kernel computes the matrix transposition of input matrix * - * @attention The number of bytes of the data type need to be passed at compile time using -DDATA_TYPE_IN_BYTES. DATA_TYPE_IN_BYTES can be: + * @note The number of bytes of the data type need to be passed at compile time using -DDATA_TYPE_IN_BYTES. DATA_TYPE_IN_BYTES can be: * -# -DDATA_TYPE_IN_BYTES=1 for transposing U8 or S8 matrices * -# -DDATA_TYPE_IN_BYTES=2 for transposing U16, S16 or FP16 matrices * -# -DDATA_TYPE_IN_BYTES=4 for transposing U32, S32 or FP32 matrices + * -# -DVEC_SIZE_X is the number of elements processed in X dimension + * -# -DVEC_SIZE_LEFTOVER_X is the leftover size in the X dimension; x_dimension % VEC_SIZE_X + * -# -DVEC_SIZE_Y is the number of elements processed in Y dimension + * -# -DVEC_SIZE_LEFTOVER_Y is the leftover size in the Y dimension; y_dimension % VEC_SIZE_Y + * * * @param[in] src_ptr Pointer to the source matrix. Supported data types: All * @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes) @@ -148,73 +125,104 @@ __kernel void transpose(IMAGE_DECLARATION(src), IMAGE_DECLARATION(dst)) { - uint x = get_global_id(0) * BLOCK_SIZE; - uint y = get_global_id(1) * BLOCK_SIZE; + uint x_offs = max((int)(get_global_id(0) * VEC_SIZE_X - (VEC_SIZE_X - VEC_SIZE_LEFTOVER_X) % VEC_SIZE_X), 0); + uint y_offs = max((int)(get_global_id(1) * VEC_SIZE_Y - (VEC_SIZE_Y - VEC_SIZE_LEFTOVER_Y) % VEC_SIZE_Y), 0); - // Compute source address - Image src = CONVERT_TO_IMAGE_STRUCT(src); + // Compute addresses + __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x_offs * DATA_TYPE_IN_BYTES + y_offs * src_stride_y; + __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + y_offs * DATA_TYPE_IN_BYTES + x_offs * dst_stride_y; - // Load the NxN block at (x, y) - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u0 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 0))); - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u1 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 1))); - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u2 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 2))); - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u3 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 3))); -#if BLOCK_SIZE > 4 - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u4 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 4))); - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u5 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 5))); - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u6 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 6))); - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u7 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 7))); -#if BLOCK_SIZE == 16 - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u8 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 8))); - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u9 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 9))); - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u10 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 10))); - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u11 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 11))); - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u12 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 12))); - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u13 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 13))); - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u14 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 14))); - VEC_DATA_TYPE(DATA_TYPE, BLOCK_SIZE) - u15 = VLOAD(0, (__global DATA_TYPE *)(offset(&src, 0, 15))); -#endif /* BLOCK_SIZE == 16 */ -#endif /* BLOCK_SIZE > 4 */ + // Load the NxM block at (x, y) + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u0 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)src_addr); +#if VEC_SIZE_Y > 1 + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u1 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + src_stride_y)); +#endif /* VEC_SIZE_Y > 1 */ +#if VEC_SIZE_Y > 2 + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u2 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y)); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u3 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y)); +#endif /* VEC_SIZE_Y > 2 */ +#if VEC_SIZE_Y > 4 + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u4 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y)); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u5 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + 5 * src_stride_y)); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u6 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + 6 * src_stride_y)); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u7 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + 7 * src_stride_y)); +#endif /* VEC_SIZE_Y > 4 */ +#if VEC_SIZE_Y > 8 + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u8 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + 8 * src_stride_y)); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u9 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + 9 * src_stride_y)); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u10 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + 10 * src_stride_y)); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u11 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + 11 * src_stride_y)); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u12 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + 12 * src_stride_y)); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u13 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + 13 * src_stride_y)); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u14 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + 14 * src_stride_y)); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_X) + u15 = VLOAD(VEC_SIZE_X)(0, (__global DATA_TYPE *)(src_addr + 15 * src_stride_y)); +#endif /* VEC_SIZE_Y > 8 */ - // Transpose the block - TRANSPOSE(); + //Create transposed vectors + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + t0 = TRANSPOSED_U(s0); +#if VEC_SIZE_X > 1 + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + t1 = TRANSPOSED_U(s1); +#endif /* VEC_SIZE_X > 1 */ +#if VEC_SIZE_X > 2 + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + t2 = TRANSPOSED_U(s2); +#endif /* VEC_SIZE_X > 2 */ +#if VEC_SIZE_X > 3 + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + t3 = TRANSPOSED_U(s3); +#endif /* VEC_SIZE_X > 3 */ +#if VEC_SIZE_X > 4 + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + t4 = TRANSPOSED_U(s4); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + t5 = TRANSPOSED_U(s5); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + t6 = TRANSPOSED_U(s6); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + t7 = TRANSPOSED_U(s7); +#endif /* VEC_SIZE_X > 4 */ +#if VEC_SIZE_X > 8 + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + t8 = TRANSPOSED_U(s8); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + t9 = TRANSPOSED_U(s9); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + tA = TRANSPOSED_U(sA); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + tB = TRANSPOSED_U(sB); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + tC = TRANSPOSED_U(sC); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + tD = TRANSPOSED_U(sD); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + tE = TRANSPOSED_U(sE); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE_Y) + tF = TRANSPOSED_U(sF); +#endif /* VEC_SIZE_X > 8 */ // Store the block at (y, x) - uint dst_offset_in_bytes = y * DATA_TYPE_IN_BYTES + x * dst_stride_y + dst_offset_first_element_in_bytes; - VSTORE(u0, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 0 * dst_stride_y)); - VSTORE(u1, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 1 * dst_stride_y)); - VSTORE(u2, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 2 * dst_stride_y)); - VSTORE(u3, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 3 * dst_stride_y)); -#if BLOCK_SIZE > 4 - VSTORE(u4, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 4 * dst_stride_y)); - VSTORE(u5, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 5 * dst_stride_y)); - VSTORE(u6, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 6 * dst_stride_y)); - VSTORE(u7, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 7 * dst_stride_y)); -#if BLOCK_SIZE == 16 - VSTORE(u8, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 8 * dst_stride_y)); - VSTORE(u9, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 9 * dst_stride_y)); - VSTORE(u10, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 10 * dst_stride_y)); - VSTORE(u11, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 11 * dst_stride_y)); - VSTORE(u12, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 12 * dst_stride_y)); - VSTORE(u13, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 13 * dst_stride_y)); - VSTORE(u14, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 14 * dst_stride_y)); - VSTORE(u15, 0, (__global DATA_TYPE *)(dst_ptr + dst_offset_in_bytes + 15 * dst_stride_y)); -#endif /* BLOCK_SIZE == 16 */ -#endif /* BLOCK_SIZE > 4 */ + REPEAT_VAR_INIT_TO_CONST(VEC_SIZE_X, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0; + STORE_BLOCK_BOUNDARY_AWARE(VEC_SIZE_X, VEC_SIZE_Y, DATA_TYPE, t, (__global uchar *)dst_addr, dst_stride_y, zout, VEC_SIZE_LEFTOVER_X, VEC_SIZE_LEFTOVER_Y, VEC_SIZE_LEFTOVER_X != 0 + && get_global_id(0) == 0, + VEC_SIZE_LEFTOVER_Y != 0 && get_global_id(1) == 0); } + +#endif // defined(DATA_TYPE_IN_BYTES) && defined(VEC_SIZE_X) && defined(VEC_SIZE_LEFTOVER_X) && defined(VEC_SIZE_Y) && defined(VEC_SIZE_LEFTOVER_Y) \ No newline at end of file diff --git a/src/core/CL/kernels/CLTransposeKernel.cpp b/src/core/CL/kernels/CLTransposeKernel.cpp index 8d967e901f..56ff48be1f 100644 --- a/src/core/CL/kernels/CLTransposeKernel.cpp +++ b/src/core/CL/kernels/CLTransposeKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,15 +30,10 @@ #include "arm_compute/core/Helpers.h" #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Utils.h" -#include "src/core/AccessWindowStatic.h" -#include "src/core/AccessWindowTranspose.h" #include "src/core/CL/CLValidate.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" - -#include -#include -#include +#include "support/StringSupport.h" namespace arm_compute { @@ -72,37 +67,12 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) return Status{}; } - -std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *output) -{ - // Configure kernel window - const unsigned int num_elems_processed_per_iteration = max_cl_vector_width / input->element_size(); - - Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration, num_elems_processed_per_iteration)); - - AccessWindowRectangle input_access(input, 0, 0, num_elems_processed_per_iteration, num_elems_processed_per_iteration); - - bool window_changed = update_window_and_padding(win, input_access); - - if(output->total_size() != 0) - { - AccessWindowTranspose output_access(output, 0, 0, num_elems_processed_per_iteration, num_elems_processed_per_iteration); - - window_changed = window_changed || update_window_and_padding(win, output_access); - - output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); - } - - Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; - return std::make_pair(err, win); -} } // namespace Status CLTransposeKernel::validate(const ITensorInfo *input, const ITensorInfo *output) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first); return Status{}; } @@ -119,20 +89,28 @@ void CLTransposeKernel::configure(const CLCompileContext &compile_context, const auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(transposed_tensor_shape(input->info()->tensor_shape()))); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info())); + auto padding_info = get_padding_info({ input, output }); _input = input; _output = output; - std::set build_opts; - std::ostringstream data_type_in_bytes; - data_type_in_bytes << input->info()->element_size(); - build_opts.emplace("-DDATA_TYPE_IN_BYTES=" + data_type_in_bytes.str()); + const unsigned int vec_size_x = adjust_vec_size(max_cl_vector_width / input->info()->element_size(), input->info()->dimension(0)); + const int vec_size_x_leftovers = input->info()->dimension(0) % vec_size_x; + const unsigned int vec_size_y = adjust_vec_size(max_cl_vector_width / input->info()->element_size(), input->info()->dimension(1)); + const int vec_size_y_leftovers = input->info()->dimension(1) % vec_size_y; + + CLBuildOptions build_opts; + build_opts.add_option("-DDATA_TYPE_IN_BYTES=" + support::cpp11::to_string(input->info()->element_size())); + build_opts.add_option("-DVEC_SIZE_X=" + support::cpp11::to_string(vec_size_x)); + build_opts.add_option("-DVEC_SIZE_LEFTOVER_X=" + support::cpp11::to_string(vec_size_x_leftovers)); + build_opts.add_option("-DVEC_SIZE_Y=" + support::cpp11::to_string(vec_size_y)); + build_opts.add_option("-DVEC_SIZE_LEFTOVER_Y=" + support::cpp11::to_string(vec_size_y_leftovers)); - _kernel = create_kernel(compile_context, "transpose", build_opts); + _kernel = create_kernel(compile_context, "transpose", build_opts.options()); // Configure kernel window - auto win_config = validate_and_configure_window(input->info(), output->info()); - ARM_COMPUTE_ERROR_THROW_ON(win_config.first); - ICLKernel::configure_internal(win_config.second, cl::NDRange(2, 8)); + Window win = calculate_max_window(*input->info(), Steps(vec_size_x, vec_size_y)); + ICLKernel::configure_internal(win, cl::NDRange(2, 8)); + ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info)); } } // namespace arm_compute \ No newline at end of file diff --git a/src/runtime/CL/functions/CLTranspose.cpp b/src/runtime/CL/functions/CLTranspose.cpp index c74503f4c0..67151be9a1 100644 --- a/src/runtime/CL/functions/CLTranspose.cpp +++ b/src/runtime/CL/functions/CLTranspose.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,8 +27,8 @@ #include -using namespace arm_compute; - +namespace arm_compute +{ void CLTranspose::configure(const ICLTensor *input, ICLTensor *output) { configure(CLKernelLibrary::get().get_compile_context(), input, output); @@ -45,3 +45,4 @@ Status CLTranspose::validate(const ITensorInfo *input, const ITensorInfo *output { return CLTransposeKernel::validate(input, output); } +} // namespace arm_compute \ No newline at end of file diff --git a/tests/validation/CL/Transpose.cpp b/tests/validation/CL/Transpose.cpp index 876bf29dd5..943534058b 100644 --- a/tests/validation/CL/Transpose.cpp +++ b/tests/validation/CL/Transpose.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -47,19 +47,15 @@ TEST_SUITE(Transpose) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::U8), // Input not a multiple of 8 - TensorInfo(TensorShape(21U, 13U), 1, DataType::U16), // Invalid shape - TensorInfo(TensorShape(20U, 13U), 1, DataType::U32), // Window shrink + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(21U, 13U), 1, DataType::U16), // Invalid shape TensorInfo(TensorShape(20U, 13U), 1, DataType::U8), // Wrong data type TensorInfo(TensorShape(20U, 16U), 1, DataType::U32), // Valid }), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(13U, 21U), 1, DataType::U8), - TensorInfo(TensorShape(21U, 13U), 1, DataType::U16), - TensorInfo(TensorShape(13U, 20U), 1, DataType::U32), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(13U, 20U), 1, DataType::U32), TensorInfo(TensorShape(31U, 20U), 1, DataType::U16), TensorInfo(TensorShape(16U, 20U), 1, DataType::U32), })), - framework::dataset::make("Expected", { false, false, false, false, true })), + framework::dataset::make("Expected", { false, false, true })), a_info, output_info, expected) { // Lock tensors @@ -102,7 +98,9 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLTransposeFixture, framework::Datase TEST_SUITE_END() // U16 TEST_SUITE(U32) -FIXTURE_DATA_TEST_CASE(RunSmall, CLTransposeFixture, framework::DatasetMode::PRECOMMIT, combine(concat(datasets::Small1DShapes(), datasets::Small2DShapes()), +FIXTURE_DATA_TEST_CASE(RunSmall, CLTransposeFixture, framework::DatasetMode::PRECOMMIT, combine(concat(concat(framework::dataset::make("Shape", { TensorShape{ 1U, 5U }, TensorShape{ 4U, 5U }, TensorShape{ 3, 12 } }), + datasets::Small1DShapes()), + datasets::Small2DShapes()), framework::dataset::make("DataType", DataType::U32))) { // Validate output -- cgit v1.2.1