From 7075fe2c5ee6f7cfe7cfd9454d905235e70b9ac4 Mon Sep 17 00:00:00 2001 From: Adnan AlSinan Date: Mon, 5 Jul 2021 13:12:52 +0100 Subject: Reorganize the kernels into nhwc, nchw and common folders The Following kernels have been split into nchw/nhwc kernels files: - batchnormalization_layer - batch_to_space - channel_shuffle - depth_to_space - dequantization_layer - im2col - normalization_layer - normalize_planar_yuv_layer - normalize_planar_yuv_layer_quantized - pooling_layer - pooling_layer_quantized - remap - reorg_layer - scale - scale_quantized - space_to_batch - space_to_depth - upsample_layer - winograd_filter_transform - winograd_input_transform - winograd_output_transform The following kernels have been moved to nchw folder: - direct_convolution1x1 - direct_convolution3x3 - direct_convolution5x5 - direct_convolution_quantized - prior_box_layer The following kernels have been moved to nhwc folder: - direct_convolution - dwc_native_fp_nhwc - dwc_native_quantized_nhwc The following kernels have been removed: - sobel_filter While the rest kerenls have been moved to the common folder. Partially resolves COMPMID-4453 Signed-off-by: Adnan AlSinan Change-Id: Ic327ac935687ec351c610c65a3c6357f364a5a58 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5919 Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins --- src/core/CL/cl_kernels/nhwc/channel_shuffle.cl | 160 +++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 src/core/CL/cl_kernels/nhwc/channel_shuffle.cl (limited to 'src/core/CL/cl_kernels/nhwc/channel_shuffle.cl') diff --git a/src/core/CL/cl_kernels/nhwc/channel_shuffle.cl b/src/core/CL/cl_kernels/nhwc/channel_shuffle.cl new file mode 100644 index 0000000000..233beb3aa9 --- /dev/null +++ b/src/core/CL/cl_kernels/nhwc/channel_shuffle.cl @@ -0,0 +1,160 @@ +/* +* Copyright (c) 2018-2021 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "helpers.h" +#include "tile_helpers.h" + +#if defined(DATA_TYPE) && defined(VEC_SIZE) && defined(NUM_GROUPS) && defined(K) && defined(SRC_DIM_Z) + +// Check valid VEC_SIZES +#if VEC_SIZE != 1 && VEC_SIZE != 2 && VEC_SIZE != 3 && VEC_SIZE != 4 && VEC_SIZE != 8 && VEC_SIZE != 16 +#error "Only vector sizes 1, 2, 3, 4, 8 and 16 are supported" +#endif // VEC_SIZE != 1 && VEC_SIZE != 2 && VEC_SIZE != 3 && VEC_SIZE != 4 && VEC_SIZE != 8 && VEC_SIZE != 16 + +#define DIV_MOD_UINT(x, y, div_res, mod_res) \ + ({ \ + div_res = (uint)((x) * (float)(1.0f / (float)(y))); \ + uint r = div_res * (y); \ + mod_res = (x)-r; \ + }) + +#if defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_DIM_X) + +/** Performs channel shuffle when the data layout is NHWC. See https://arxiv.org/pdf/1707.01083.pdf for details. + * + * @note The vector size must be given as a preprocessor argument using -DVEC_SIZE=num. e.g. -DVEC_SIZE=4 + * @note The third dimension of the tensor must be given as a preprocessor argument using -DSRC_DIM_Z=num. e.g. -DSRC_DIM_Z=64 + * @note The first dimension of the tensor must be given as a preprocessor argument using -DSRC_DIM_X=num. e.g. -DSRC_DIM_X=64 + * @note The number of groups must be given as a preprocessor argument using -DNUM_GROUPS=num_groups. e.g. -DNUM_GROUPS=2 + * @note The number of channels in each group must be given as a preprocessor argument using -DK=num. e.g. -DK=1 + * K is equal to num_channels / num_groups. + * @note The leftover size in the X dimension shoud be given as preprocessor argument using -DVEC_SIZE_LEFTOVER is; x_dimension % VEC_SIZE. e.g. -DVEC_SIZE_LEFTOVER=1 + * + * @param[in] src_ptr Pointer to the source matrix. Supported data types: All + * @param[in] src_stride_x Stride of the first source tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the first source tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the first source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_stride_w Stride of the first source tensor in Z dimension (in bytes) + * @param[in] src_step_w src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the first source tensor + * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x output_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y output_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_step_z output_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_stride_w Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_step_w output_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + */ +__kernel void channel_shuffle_nhwc(TENSOR4D_DECLARATION(src), + TENSOR4D_DECLARATION(dst)) +{ + // Offset computation + const uint curr_out_channel = GET_SPATIAL_IDX(0, VEC_SIZE, VEC_SIZE_LEFTOVER); // output feature map + + uint z = 0; + uint batch_id = 0; + // Compute curr_channel and batch_id + DIV_MOD_UINT(get_global_id(2), (uint)SRC_DIM_Z, batch_id, z); + + VEC_DATA_TYPE(uint, VEC_SIZE) + curr_out_channels = (VEC_DATA_TYPE(uint, VEC_SIZE))(curr_out_channel) + VEC_OFFS(uint, VEC_SIZE); + + VEC_DATA_TYPE(uint, VEC_SIZE) + in_channels = (curr_out_channels * (VEC_DATA_TYPE(uint, VEC_SIZE))(K)) % (VEC_DATA_TYPE(uint, VEC_SIZE))(SRC_DIM_X) + (curr_out_channels / (VEC_DATA_TYPE(uint, VEC_SIZE))(NUM_GROUPS)); + + // Load the values + const __global DATA_TYPE *input_ptr = (const __global DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + get_global_id(1) * src_stride_y + z * src_stride_z + batch_id * src_stride_w); + +#if VEC_SIZE == 1 + DATA_TYPE out0 = *((const __global * DATA_TYPE)(input_ptr) + in_channels); +#elif VEC_SIZE == 2 + VEC_DATA_TYPE(DATA_TYPE, 2) + out0 = + { + *(input_ptr + in_channels.s0), + *(input_ptr + in_channels.s1) + }; +#elif VEC_SIZE == 3 + VEC_DATA_TYPE(DATA_TYPE, 3) + out0 = + { + *(input_ptr + in_channels.s0), + *(input_ptr + in_channels.s1), + *(input_ptr + in_channels.s2) + }; +#elif VEC_SIZE == 4 + VEC_DATA_TYPE(DATA_TYPE, 4) + out0 = + { + *(input_ptr + in_channels.s0), + *(input_ptr + in_channels.s1), + *(input_ptr + in_channels.s2), + *(input_ptr + in_channels.s3) + }; +#elif VEC_SIZE == 8 + VEC_DATA_TYPE(DATA_TYPE, 8) + out0 = + { + *(input_ptr + in_channels.s0), + *(input_ptr + in_channels.s1), + *(input_ptr + in_channels.s2), + *(input_ptr + in_channels.s3), + *(input_ptr + in_channels.s4), + *(input_ptr + in_channels.s5), + *(input_ptr + in_channels.s6), + *(input_ptr + in_channels.s7) + }; +#elif VEC_SIZE == 16 + VEC_DATA_TYPE(DATA_TYPE, 16) + out0 = + { + *(input_ptr + in_channels.s0), + *(input_ptr + in_channels.s1), + *(input_ptr + in_channels.s2), + *(input_ptr + in_channels.s3), + *(input_ptr + in_channels.s4), + *(input_ptr + in_channels.s5), + *(input_ptr + in_channels.s6), + *(input_ptr + in_channels.s7), + *(input_ptr + in_channels.s8), + *(input_ptr + in_channels.s9), + *(input_ptr + in_channels.sa), + *(input_ptr + in_channels.sb), + *(input_ptr + in_channels.sc), + *(input_ptr + in_channels.sd), + *(input_ptr + in_channels.se), + *(input_ptr + in_channels.sf) + }; +#endif // VEC_SIZE == 1 + + __global uchar *output_ptr = dst_ptr + curr_out_channel * sizeof(DATA_TYPE) + dst_offset_first_element_in_bytes + get_global_id(1) * dst_stride_y + z * dst_stride_z + batch_id * dst_stride_w; + STORE_VECTOR_SELECT(out, DATA_TYPE, output_ptr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0); +} +#endif // defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) && defined(SRC_DIM_X) +#endif // defined(DATA_TYPE) && defined(VEC_SIZE) && defined(NUM_GROUPS) && defined(K) && defined(SRC_DIM_Z) \ No newline at end of file -- cgit v1.2.1