From 3d319469e5f28066c507e4228dfeb6b9fdfb38a5 Mon Sep 17 00:00:00 2001 From: Pablo Tello Date: Thu, 21 Jun 2018 15:13:17 +0100 Subject: COMPMID-807: NHWC support in CLDirectConvolution. Change-Id: I8738aca2cc0104e4c4d7c9605762ab59fce10a33 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/137333 Reviewed-by: Giorgio Arena Reviewed-by: Anthony Barbier Tested-by: Jenkins --- src/core/CL/CLKernelLibrary.cpp | 3 + src/core/CL/cl_kernels/direct_convolution1x1.cl | 137 ++++++++++- src/core/CL/cl_kernels/direct_convolution3x3.cl | 227 ++++++++++++++++-- src/core/CL/cl_kernels/direct_convolution5x5.cl | 234 +++++++++++++++++-- .../CL/kernels/CLDirectConvolutionLayerKernel.cpp | 253 ++++++++++++++++----- 5 files changed, 733 insertions(+), 121 deletions(-) (limited to 'src/core/CL') diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index ba6a629e0d..475352456c 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -213,10 +213,13 @@ const std::map CLKernelLibrary::_kernel_program_map = { "derivative", "derivative.cl" }, { "dilate", "dilate.cl" }, { "direct_convolution1x1", "direct_convolution1x1.cl" }, + { "direct_convolution1x1_nhwc", "direct_convolution1x1.cl" }, { "direct_convolution1x1_f32_bifrost", "direct_convolution1x1.cl" }, { "direct_convolution3x3", "direct_convolution3x3.cl" }, + { "direct_convolution3x3_nhwc", "direct_convolution3x3.cl" }, { "direct_convolution3x3_f32_bifrost", "direct_convolution3x3.cl" }, { "direct_convolution5x5", "direct_convolution5x5.cl" }, + { "direct_convolution5x5_nhwc", "direct_convolution5x5.cl" }, { "direct_convolution5x5_f32_bifrost", "direct_convolution5x5.cl" }, { "direct_convolution_1x1_3x3_5x5_quantized", "direct_convolution_1x1_3x3_5x5_quantized.cl" }, { "erode", "erode.cl" }, diff --git a/src/core/CL/cl_kernels/direct_convolution1x1.cl b/src/core/CL/cl_kernels/direct_convolution1x1.cl index 7a308c99e2..cceeb0f9c4 100644 --- a/src/core/CL/cl_kernels/direct_convolution1x1.cl +++ b/src/core/CL/cl_kernels/direct_convolution1x1.cl @@ -31,6 +31,122 @@ #if defined(DATA_TYPE) && defined(DATA_SIZE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) +#if defined(DATA_LAYOUT_NHWC) + +#define PTR_TO_VALUE(PTR, DATA_TYPE) *((__global DATA_TYPE *)(PTR)) + +/** This kernel performs a direct convolution to convolve the low three dimensions of a tensor with data layout NHWC + * + * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float + * @note The data size must be passed at compile time using -DDATA_SIZE e.g. -DDATA_SIZE=32 + * @note The convolution stride x must be passed at compile time using -DSTRIDE_X e.g. -DSTRIDE_X=1 + * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH + * @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row. + * + * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32 + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr + * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) + * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) + * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes) + * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes) + * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor + * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr + * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes) + * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor + * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension + */ +__kernel void direct_convolution1x1_nhwc( + TENSOR3D_DECLARATION(src), + TENSOR3D_DECLARATION(dst), + TENSOR3D_DECLARATION(weights), +#ifdef HAS_BIAS + VECTOR_DECLARATION(biases), +#endif /* defined(HAS_BIAS) */ + unsigned int weights_stride_w) +{ + Image src = CONVERT_TO_IMAGE_STRUCT(src); + Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights); + Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); + +#ifdef HAS_BIAS + Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases); +#endif /* defined(HAS_BIAS) */ + + VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8) + values = 0; + const int id0 = get_global_id(0); + const int id1 = get_global_id(1); + const int id2 = get_global_id(2); + weights.ptr += id0 * weights_stride_w; + __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0) - src_stride_x * id0 + id2 * STRIDE_Y * (int)src_stride_z; + + for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d) + { + DATA_TYPE weight = *(__global DATA_TYPE *)weights.ptr; +#if STRIDE_X == 1 + VEC_DATA_TYPE(DATA_TYPE, 8) + col0 = (VEC_DATA_TYPE(DATA_TYPE, 8))( + PTR_TO_VALUE(src_addr + 0 * src_stride_y, DATA_TYPE), + PTR_TO_VALUE(src_addr + 1 * src_stride_y, DATA_TYPE), + PTR_TO_VALUE(src_addr + 2 * src_stride_y, DATA_TYPE), + PTR_TO_VALUE(src_addr + 3 * src_stride_y, DATA_TYPE), + PTR_TO_VALUE(src_addr + 4 * src_stride_y, DATA_TYPE), + PTR_TO_VALUE(src_addr + 5 * src_stride_y, DATA_TYPE), + PTR_TO_VALUE(src_addr + 6 * src_stride_y, DATA_TYPE), + PTR_TO_VALUE(src_addr + 7 * src_stride_y, DATA_TYPE)); +#elif STRIDE_X == 2 /* STRIDE_X == 1 */ + VEC_DATA_TYPE(DATA_TYPE, 8) + col0 = (VEC_DATA_TYPE(DATA_TYPE, 8))( + PTR_TO_VALUE(src_addr + 0 * src_stride_y, DATA_TYPE), + PTR_TO_VALUE(src_addr + 2 * src_stride_y, DATA_TYPE), + PTR_TO_VALUE(src_addr + 4 * src_stride_y, DATA_TYPE), + PTR_TO_VALUE(src_addr + 6 * src_stride_y, DATA_TYPE), + PTR_TO_VALUE(src_addr + 8 * src_stride_y, DATA_TYPE), + PTR_TO_VALUE(src_addr + 10 * src_stride_y, DATA_TYPE), + PTR_TO_VALUE(src_addr + 12 * src_stride_y, DATA_TYPE), + PTR_TO_VALUE(src_addr + 14 * src_stride_y, DATA_TYPE)); +#else /* STRIDE_X not equals 1 or 2 */ +#error "STRIDE_X larger than 2 is not supported" +#endif /* STRIDE_X == 2 */ + values = ADD_OP(values, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))weight, col0)); + + src_addr += src_stride_x; + weights.ptr += weights_stride_x; + } + +#ifdef HAS_BIAS + values = ADD_OP(values, (VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, id0)))); +#endif /* defined(HAS_BIAS) */ + + *((__global DATA_TYPE *)dst.ptr) = values.s0; + *((__global DATA_TYPE *)(dst.ptr + 1 * dst_stride_y)) = values.s1; + *((__global DATA_TYPE *)(dst.ptr + 2 * dst_stride_y)) = values.s2; + *((__global DATA_TYPE *)(dst.ptr + 3 * dst_stride_y)) = values.s3; + *((__global DATA_TYPE *)(dst.ptr + 4 * dst_stride_y)) = values.s4; + *((__global DATA_TYPE *)(dst.ptr + 5 * dst_stride_y)) = values.s5; + *((__global DATA_TYPE *)(dst.ptr + 6 * dst_stride_y)) = values.s6; + *((__global DATA_TYPE *)(dst.ptr + 7 * dst_stride_y)) = values.s7; +} +#endif // defined(DATA_LAYOUT_NHWC) + #if STRIDE_X == 3 #define INPUT_PIXEL_STR(data_size) extract_input_stride3_##data_size #define INPUT_PIXEL(data_size) INPUT_PIXEL_STR(data_size) @@ -46,7 +162,7 @@ * * @param[in] input_pixel Pointer to the first pixel. * - * @return extracted input pixels. + * @return extracted input values. */ inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride1(__global const DATA_TYPE *input_pixel) { @@ -57,7 +173,7 @@ inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride1(__global const DATA_TYP * * @param[in] input_pixel Pointer to the first pixel. * - * @return extracted input pixels. + * @return extracted input values. */ inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride2(__global const DATA_TYPE *input_pixel) { @@ -70,7 +186,7 @@ inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride2(__global const DATA_TYP * * @param[in] input_pixel Pointer to the first pixel. * - * @return extracted input pixels. + * @return extracted input values. */ inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride3_32(__global const DATA_TYPE *input_pixel) { @@ -89,7 +205,7 @@ inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride3_32(__global const DATA_ * * @param[in] input_pixel Pointer to the first pixel. * - * @return extracted input pixels. + * @return extracted input values. */ inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride3_16(__global const DATA_TYPE *input_pixel) { @@ -106,7 +222,7 @@ inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride3_16(__global const DATA_ * * @param[in] input_pixel Pointer to the first pixel. * - * @return extracted input pixels. + * @return extracted input values. */ inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride3_8(__global const DATA_TYPE *input_pixel) { @@ -173,27 +289,26 @@ __kernel void direct_convolution1x1( #endif /* defined(HAS_BIAS) */ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8) - pixels = 0; + values = 0; const uint z_index = get_global_id(2); weights.ptr += z_index * weights_stride_w; - for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d) { DATA_TYPE weight = *(__global DATA_TYPE *)weights.ptr; VEC_DATA_TYPE(DATA_TYPE, 8) input_pixel = INPUT_PIXEL(DATA_SIZE)((__global DATA_TYPE *)src.ptr); - pixels = ADD_OP(pixels, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))weight, input_pixel)); + values = ADD_OP(values, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))weight, input_pixel)); src.ptr += src_stride_z; weights.ptr += weights_stride_z; } #ifdef HAS_BIAS - pixels = ADD_OP(pixels, (VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, z_index)))); + values = ADD_OP(values, (VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, z_index)))); #endif /* defined(HAS_BIAS) */ - vstore8(CONVERT_SAT(pixels, VEC_DATA_TYPE(DATA_TYPE, 8)), 0, (__global DATA_TYPE *)dst.ptr); + vstore8(CONVERT_SAT(values, VEC_DATA_TYPE(DATA_TYPE, 8)), 0, (__global DATA_TYPE *)dst.ptr); } #endif // defined(DATA_TYPE) && defined(DATA_SIZE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) @@ -314,4 +429,4 @@ __kernel void direct_convolution1x1_f32_bifrost( vstore4(acc2, 0, (__global float *)(dst.ptr + 2 * dst_stride_y)); vstore4(acc3, 0, (__global float *)(dst.ptr + 3 * dst_stride_y)); } -#endif // defined(WEIGHTS_DEPTH) \ No newline at end of file +#endif // defined(WEIGHTS_DEPTH) diff --git a/src/core/CL/cl_kernels/direct_convolution3x3.cl b/src/core/CL/cl_kernels/direct_convolution3x3.cl index 824306f2ba..08d25f6741 100644 --- a/src/core/CL/cl_kernels/direct_convolution3x3.cl +++ b/src/core/CL/cl_kernels/direct_convolution3x3.cl @@ -66,6 +66,185 @@ acc = ADD_OP(acc, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s2468, src0.sACE, src1), (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s2)); \ }) +#if defined(DATA_LAYOUT_NHWC) + +#define PTR_TO_VALUE(PTR, DATA_TYPE) *((__global DATA_TYPE *)(PTR)) + +#if STRIDE_X == 1 +#define CONVOLUTION1x3_NHWC(acc, row_ptr, weights_ptr) CONVOLUTION1x3_STRIDE_NHWC_STRIDE1(acc, row_ptr, weights_ptr) +#elif STRIDE_X == 2 /* STRIDE_X == 1 */ +#define CONVOLUTION1x3_NHWC(acc, row_ptr, weights_ptr) CONVOLUTION1x3_STRIDE_NHWC_STRIDE2(acc, row_ptr, weights_ptr) +#else /* STRIDE_X not equals 1 or 2 */ +#error "STRIDE_X larger than 2 is not supported" +#endif /* STRIDE_X == 2 */ + +#define CONVOLUTION1x3_STRIDE_NHWC_STRIDE1(acc, row_ptr, weights_ptr) \ + { \ + VEC_DATA_TYPE(DATA_TYPE, 8) \ + src0 = (VEC_DATA_TYPE(DATA_TYPE, 8))( \ + PTR_TO_VALUE(row_ptr + 0 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 1 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 2 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 3 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 4 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 5 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 6 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 7 * src_stride_y, DATA_TYPE)); \ + VEC_DATA_TYPE(DATA_TYPE, 2) \ + src1 = (VEC_DATA_TYPE(DATA_TYPE, 2))( \ + PTR_TO_VALUE(row_ptr + 8 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 9 * src_stride_y, DATA_TYPE)); \ + VEC_DATA_TYPE(DATA_TYPE, 3) \ + weights = (VEC_DATA_TYPE(DATA_TYPE, 3))( \ + PTR_TO_VALUE((weights_ptr) + 0 * weights_stride_y, DATA_TYPE), \ + PTR_TO_VALUE((weights_ptr) + 1 * weights_stride_y, DATA_TYPE), \ + PTR_TO_VALUE((weights_ptr) + 2 * weights_stride_y, DATA_TYPE)); \ + acc = ADD_OP(acc, MUL_OP(src0, (VEC_DATA_TYPE(DATA_TYPE, 8))weights.s0)); \ + acc = ADD_OP(acc, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1234, src0.s567, src1.s0), (VEC_DATA_TYPE(DATA_TYPE, 8))weights.s1)); \ + acc = ADD_OP(acc, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s234, src0.s567, src1.s01), (VEC_DATA_TYPE(DATA_TYPE, 8))weights.s2)); \ + } + +#define CONVOLUTION1x3_STRIDE_NHWC_STRIDE2(acc, row_ptr, weights_ptr) \ + { \ + VEC_DATA_TYPE(DATA_TYPE, 16) \ + src0 = (VEC_DATA_TYPE(DATA_TYPE, 16))( \ + PTR_TO_VALUE(row_ptr + 0 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 1 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 2 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 3 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 4 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 5 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 6 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 7 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 8 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 9 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 10 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 11 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 12 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 13 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 14 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 15 * src_stride_y, DATA_TYPE)); \ + DATA_TYPE src1 = PTR_TO_VALUE(row_ptr + 16 * src_stride_y, DATA_TYPE); \ + VEC_DATA_TYPE(DATA_TYPE, 3) \ + weights = (VEC_DATA_TYPE(DATA_TYPE, 3))( \ + PTR_TO_VALUE((weights_ptr) + 0 * weights_stride_y, DATA_TYPE), \ + PTR_TO_VALUE((weights_ptr) + 1 * weights_stride_y, DATA_TYPE), \ + PTR_TO_VALUE((weights_ptr) + 2 * weights_stride_y, DATA_TYPE)); \ + \ + acc = ADD_OP(acc, MUL_OP(src0.s02468ACE, (VEC_DATA_TYPE(DATA_TYPE, 8))weights.s0)); \ + acc = ADD_OP(acc, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1357, src0.s9BDF), (VEC_DATA_TYPE(DATA_TYPE, 8))weights.s1)); \ + acc = ADD_OP(acc, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s2468, src0.sACE, src1), (VEC_DATA_TYPE(DATA_TYPE, 8))weights.s2)); \ + } + +/** This kernel performs a direct convolution to convolve the low three dimensions. + * + * @note This OpenCL kernel works with stride_x = 1 and 2 + * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float + * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH + * @note If biases are used then -DHAS_BIAS has to be passed at compile time + * + * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/QS16/F16/F32 + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr + * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) + * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) + * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes) + * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes) + * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor + * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr + * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes) + * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor + * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension + */ +__kernel void direct_convolution3x3_nhwc( + TENSOR3D_DECLARATION(src), + TENSOR3D_DECLARATION(dst), + TENSOR3D_DECLARATION(weights), +#ifdef HAS_BIAS + VECTOR_DECLARATION(biases), +#endif /* defined(HAS_BIAS) */ + unsigned int weights_stride_w) +{ + Image src = CONVERT_TO_IMAGE_STRUCT(src); + Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights); + Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); + + VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8) + values0 = 0; + const int id0 = get_global_id(0); + const int id1 = get_global_id(1); + const int id2 = get_global_id(2); + + __global uchar *weights_addr = (__global uchar *)tensor3D_offset(&weights, 0, 0, 0); + __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0) - src_stride_x * id0 + ((id2 * STRIDE_Y) - PAD_TOP) * (int)src_stride_z; + + weights_addr += id0 * weights_stride_w; + + const int coordy = ((id2 * STRIDE_Y) - PAD_TOP); + for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d) + { +#if PAD_TOP > 0 + if(coordy < 0) // special case Z = -1 doesn't exists + { + //skip first row and load the two next ones + CONVOLUTION1x3_NHWC(values0, src_addr + 1 * (int)src_stride_z, (weights_addr + 1 * (int)weights_stride_z)); + CONVOLUTION1x3_NHWC(values0, src_addr + 2 * (int)src_stride_z, (weights_addr + 2 * (int)weights_stride_z)); + } + else if(coordy == (SRC_HEIGHT - PAD_TOP - 1)) + { + // special case when computing the last row of the output we must read the last three rows from the input buffer (including padding) but the + // Z axis has no padding at all. + CONVOLUTION1x3_NHWC(values0, src_addr, (weights_addr + 0 * (int)weights_stride_z)); + CONVOLUTION1x3_NHWC(values0, src_addr + 1 * (int)src_stride_z, (weights_addr + 1 * (int)weights_stride_z)); + } + else + { + CONVOLUTION1x3_NHWC(values0, src_addr, (weights_addr + 0 * (int)weights_stride_z)); + CONVOLUTION1x3_NHWC(values0, src_addr + 1 * (int)src_stride_z, (weights_addr + 1 * (int)weights_stride_z)); + CONVOLUTION1x3_NHWC(values0, src_addr + 2 * (int)src_stride_z, (weights_addr + 2 * (int)weights_stride_z)); + } +#else // PAD_TOP > 0 + CONVOLUTION1x3_NHWC(values0, src_addr, (weights_addr + 0 * (int)weights_stride_z)); + CONVOLUTION1x3_NHWC(values0, src_addr + 1 * (int)src_stride_z, (weights_addr + 1 * (int)weights_stride_z)); + CONVOLUTION1x3_NHWC(values0, src_addr + 2 * (int)src_stride_z, (weights_addr + 2 * (int)weights_stride_z)); +#endif // PAD_TOP > 0 + src_addr += src_stride_x; + weights_addr += weights_stride_x; + } + +#ifdef HAS_BIAS + Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases); + values0 = ADD_OP(values0, (VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, id0)))); +#endif /* defined(HAS_BIAS) */ + + *((__global DATA_TYPE *)(dst.ptr + 0 * dst_stride_y)) = values0.s0; + *((__global DATA_TYPE *)(dst.ptr + 1 * dst_stride_y)) = values0.s1; + *((__global DATA_TYPE *)(dst.ptr + 2 * dst_stride_y)) = values0.s2; + *((__global DATA_TYPE *)(dst.ptr + 3 * dst_stride_y)) = values0.s3; + *((__global DATA_TYPE *)(dst.ptr + 4 * dst_stride_y)) = values0.s4; + *((__global DATA_TYPE *)(dst.ptr + 5 * dst_stride_y)) = values0.s5; + *((__global DATA_TYPE *)(dst.ptr + 6 * dst_stride_y)) = values0.s6; + *((__global DATA_TYPE *)(dst.ptr + 7 * dst_stride_y)) = values0.s7; +} +#endif // defined(DATA_LAYOUT_NHWC) + /** This kernel performs a direct convolution to convolve the low three dimensions. * * @note This OpenCL kernel works with stride_x = 1 and 2 @@ -117,7 +296,7 @@ __kernel void direct_convolution3x3( Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8) - pixels0 = 0; + values0 = 0; __global uchar *weights_addr = (__global uchar *)tensor3D_offset(&weights, 0, 0, 0); __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0); @@ -127,9 +306,9 @@ __kernel void direct_convolution3x3( for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d) { - CONVOLUTION1x3(pixels0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y)); - CONVOLUTION1x3(pixels0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y)); - CONVOLUTION1x3(pixels0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y)); + CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_y)); + CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y)); + CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y)); src_addr += src_stride_z; weights_addr += weights_stride_z; @@ -138,10 +317,10 @@ __kernel void direct_convolution3x3( #ifdef HAS_BIAS Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases); - pixels0 = ADD_OP(pixels0, (VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, kernel_index)))); + values0 = ADD_OP(values0, (VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, kernel_index)))); #endif /* defined(HAS_BIAS) */ - vstore8(CONVERT_SAT(pixels0, VEC_DATA_TYPE(DATA_TYPE, 8)), 0, (__global DATA_TYPE *)dst.ptr); + vstore8(CONVERT_SAT(values0, VEC_DATA_TYPE(DATA_TYPE, 8)), 0, (__global DATA_TYPE *)dst.ptr); } #endif //defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) @@ -214,9 +393,9 @@ __kernel void direct_convolution3x3_f32_bifrost( Image src = CONVERT_TO_IMAGE_STRUCT(src); Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); - float4 pixels0 = 0; - float4 pixels1 = 0; - float4 pixels2 = 0; + float4 values0 = 0; + float4 values1 = 0; + float4 values2 = 0; __global uchar *weights_addr = (__global uchar *)(weights_ptr + weights_offset_first_element_in_bytes + kernel_index * weights_stride_w); __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0); @@ -236,39 +415,39 @@ __kernel void direct_convolution3x3_f32_bifrost( src0 = vload4(0, (__global float *)(src_addr + 0 * src_stride_y)); src1 = vload2(0, (__global float *)(src_addr + 0 * src_stride_y) + 4); - CONVOLUTION1x3_BIFROST(pixels0, src0, src1, weights_row0); + CONVOLUTION1x3_BIFROST(values0, src0, src1, weights_row0); // Load values from row1 of input tensor src0 = vload4(0, (__global float *)(src_addr + 1 * src_stride_y)); src1 = vload2(0, (__global float *)(src_addr + 1 * src_stride_y) + 4); // Accumulate - CONVOLUTION1x3_BIFROST(pixels0, src0, src1, weights_row1); - CONVOLUTION1x3_BIFROST(pixels1, src0, src1, weights_row0); + CONVOLUTION1x3_BIFROST(values0, src0, src1, weights_row1); + CONVOLUTION1x3_BIFROST(values1, src0, src1, weights_row0); // Load values from row2 of input tensor src0 = vload4(0, (__global float *)(src_addr + 2 * src_stride_y)); src1 = vload2(0, (__global float *)(src_addr + 2 * src_stride_y) + 4); // Accumulate - CONVOLUTION1x3_BIFROST(pixels0, src0, src1, weights_row2); - CONVOLUTION1x3_BIFROST(pixels1, src0, src1, weights_row1); - CONVOLUTION1x3_BIFROST(pixels2, src0, src1, weights_row0); + CONVOLUTION1x3_BIFROST(values0, src0, src1, weights_row2); + CONVOLUTION1x3_BIFROST(values1, src0, src1, weights_row1); + CONVOLUTION1x3_BIFROST(values2, src0, src1, weights_row0); // Load values from row3 of input tensor src0 = vload4(0, (__global float *)(src_addr + 3 * src_stride_y)); src1 = vload2(0, (__global float *)(src_addr + 3 * src_stride_y) + 4); // Accumulate - CONVOLUTION1x3_BIFROST(pixels1, src0, src1, weights_row2); - CONVOLUTION1x3_BIFROST(pixels2, src0, src1, weights_row1); + CONVOLUTION1x3_BIFROST(values1, src0, src1, weights_row2); + CONVOLUTION1x3_BIFROST(values2, src0, src1, weights_row1); // Row4 src0 = vload4(0, (__global float *)(src_addr + 4 * src_stride_y)); src1 = vload2(0, (__global float *)(src_addr + 4 * src_stride_y) + 4); // Accumulate - CONVOLUTION1x3_BIFROST(pixels2, src0, src1, weights_row2); + CONVOLUTION1x3_BIFROST(values2, src0, src1, weights_row2); src_addr += src_stride_z; weights_addr += weights_stride_z; @@ -279,13 +458,13 @@ __kernel void direct_convolution3x3_f32_bifrost( float bias = (float) * ((__global float *)(vector_offset(&biases, kernel_index))); - pixels0 += (float4)bias; - pixels1 += (float4)bias; - pixels2 += (float4)bias; + values0 += (float4)bias; + values1 += (float4)bias; + values2 += (float4)bias; #endif /* defined(HAS_BIAS) */ - vstore4(pixels0, 0, (__global float *)(dst.ptr + 0 * dst_stride_y)); - vstore4(pixels1, 0, (__global float *)(dst.ptr + 1 * dst_stride_y)); - vstore4(pixels2, 0, (__global float *)(dst.ptr + 2 * dst_stride_y)); + vstore4(values0, 0, (__global float *)(dst.ptr + 0 * dst_stride_y)); + vstore4(values1, 0, (__global float *)(dst.ptr + 1 * dst_stride_y)); + vstore4(values2, 0, (__global float *)(dst.ptr + 2 * dst_stride_y)); } #endif // defined(WEIGHTS_DEPTH) diff --git a/src/core/CL/cl_kernels/direct_convolution5x5.cl b/src/core/CL/cl_kernels/direct_convolution5x5.cl index e678f6f51b..70be058854 100644 --- a/src/core/CL/cl_kernels/direct_convolution5x5.cl +++ b/src/core/CL/cl_kernels/direct_convolution5x5.cl @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017 ARM Limited. + * Copyright (c) 2016-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -69,6 +69,190 @@ acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s468a, src0.sCE, src1.s02) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_value1; \ }) +#if defined(DATA_LAYOUT_NHWC) + +#define PTR_TO_VALUE(PTR, DATA_TYPE) *((__global DATA_TYPE *)(PTR)) + +#if STRIDE_X == 1 +#define CONVOLUTION1x5_NHWC(acc, row_ptr, weights_ptr) CONVOLUTION1x5_STRIDE1_NHWC(acc, row_ptr, weights_ptr) +#elif STRIDE_X == 2 /* STRIDE_X == 1 */ +#define CONVOLUTION1x5_NHWC(acc, row_ptr, weights_ptr) CONVOLUTION1x5_STRIDE2_NHWC(acc, row_ptr, weights_ptr) +#else /* STRIDE_X not equals 1 or 2 */ +#error "STRIDE_X larger than 2 is not supported" +#endif /* STRIDE_X == 2 */ + +#define CONVOLUTION1x5_STRIDE1_NHWC(acc, row_ptr, weights_ptr) \ + ({ \ + VEC_DATA_TYPE(DATA_TYPE, 8) \ + src0 = (VEC_DATA_TYPE(DATA_TYPE, 8))( \ + PTR_TO_VALUE(row_ptr + 0 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 1 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 2 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 3 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 4 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 5 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 6 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 7 * src_stride_y, DATA_TYPE)); \ + VEC_DATA_TYPE(DATA_TYPE, 4) \ + src1 = (VEC_DATA_TYPE(DATA_TYPE, 4))( \ + PTR_TO_VALUE(row_ptr + 8 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 9 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 10 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 11 * src_stride_y, DATA_TYPE)); \ + VEC_DATA_TYPE(DATA_TYPE, 4) \ + weights_values0 = (VEC_DATA_TYPE(DATA_TYPE, 4))( \ + PTR_TO_VALUE(weights_ptr + 0 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 1 * weights_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(weights_ptr + 2 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 3 * weights_stride_y, DATA_TYPE)); \ + DATA_TYPE weights_value1 = PTR_TO_VALUE(weights_ptr + 4 * weights_stride_y, DATA_TYPE); \ + acc += src0 * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s0; \ + acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1234, src0.s567, src1.s0) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s1; \ + acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s234, src0.s567, src1.s01) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s2; \ + acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s345, src0.s67, src1.s012) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s3; \ + acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s45, src0.s67, src1.s0123) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_value1; \ + }) + +#define CONVOLUTION1x5_STRIDE2_NHWC(acc, row_ptr, weights_ptr) \ + ({ \ + VEC_DATA_TYPE(DATA_TYPE, 16) \ + src0 = (VEC_DATA_TYPE(DATA_TYPE, 16))( \ + PTR_TO_VALUE(row_ptr + 0 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 1 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 2 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 3 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 4 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 5 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 6 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 7 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 8 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 9 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 10 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 11 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 12 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 13 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 14 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 15 * src_stride_y, DATA_TYPE)); \ + VEC_DATA_TYPE(DATA_TYPE, 4) \ + src1 = (VEC_DATA_TYPE(DATA_TYPE, 4))( \ + PTR_TO_VALUE(row_ptr + 16 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 17 * src_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(row_ptr + 18 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 19 * src_stride_y, DATA_TYPE)); \ + VEC_DATA_TYPE(DATA_TYPE, 4) \ + weights_values0 = (VEC_DATA_TYPE(DATA_TYPE, 4))( \ + PTR_TO_VALUE(weights_ptr + 0 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 1 * weights_stride_y, DATA_TYPE), \ + PTR_TO_VALUE(weights_ptr + 2 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 3 * weights_stride_y, DATA_TYPE)); \ + DATA_TYPE weights_value1 = PTR_TO_VALUE(weights_ptr + 4 * weights_stride_y, DATA_TYPE); \ + acc += src0.s02468ACE * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s0; \ + acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1357, src0.s9BDF) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s1; \ + acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s2468, src0.sACE, src1.s0) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s2; \ + \ + acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s3579, src0.sBDF, src1.s1) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s3; \ + acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s468a, src0.sCE, src1.s02) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_value1; \ + }) + +/** This kernel performs a direct convolution to convolve the low three dimensions in a tensor with the NHWC data layout + * + * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float + * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH + * @note If biases are used then -DHAS_BIAS has to be passed at compile time + * + * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32 + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr + * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) + * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) + * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes) + * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes) + * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor + * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr + * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes) + * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor + * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension + */ +__kernel void direct_convolution5x5_nhwc( + TENSOR3D_DECLARATION(src), + TENSOR3D_DECLARATION(dst), + TENSOR3D_DECLARATION(weights), +#ifdef HAS_BIAS + VECTOR_DECLARATION(biases), +#endif /* defined(HAS_BIAS) */ + unsigned int weights_stride_w) +{ + Image src = CONVERT_TO_IMAGE_STRUCT(src); + Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights); + Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); + + VEC_DATA_TYPE(DATA_TYPE, 8) + values0 = 0; + + const int id0 = get_global_id(0); + const int id1 = get_global_id(1); + const int id2 = get_global_id(2); + + __global uchar *weights_addr = (__global uchar *)tensor3D_offset(&weights, 0, 0, 0); + __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0) - src_stride_x * id0 + ((id2 * STRIDE_Y) - PAD_TOP) * (int)src_stride_z; + + weights_addr += id0 * weights_stride_w; + const int coordy = id2 - PAD_TOP; + + for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d) + { +#if(PAD_TOP) + if(coordy < 0) // special case Z = -1 doesn't exists + { + //skip first row and load the two next ones + CONVOLUTION1x5_NHWC(values0, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z)); + CONVOLUTION1x5_NHWC(values0, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z)); + CONVOLUTION1x5_NHWC(values0, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z)); + CONVOLUTION1x5_NHWC(values0, (src_addr + 4 * (int)src_stride_z), (weights_addr + 4 * (int)weights_stride_z)); + } + else if(coordy == (DST_HEIGHT - PAD_TOP - 1)) + { + // special case when computing the last row of the output we must read the last three rows from the input buffer (including padding) but the + // Z axis has no padding at all. + CONVOLUTION1x5_NHWC(values0, src_addr, weights_addr); + CONVOLUTION1x5_NHWC(values0, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z)); + CONVOLUTION1x5_NHWC(values0, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z)); + CONVOLUTION1x5_NHWC(values0, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z)); + } + else + { + CONVOLUTION1x5_NHWC(values0, src_addr, weights_addr); + CONVOLUTION1x5_NHWC(values0, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z)); + CONVOLUTION1x5_NHWC(values0, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z)); + CONVOLUTION1x5_NHWC(values0, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z)); + CONVOLUTION1x5_NHWC(values0, (src_addr + 4 * (int)src_stride_z), (weights_addr + 4 * (int)weights_stride_z)); + } +#else //PAD_TOP > 0 + CONVOLUTION1x5_NHWC(values0, src_addr, weights_addr); + CONVOLUTION1x5_NHWC(values0, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z)); + CONVOLUTION1x5_NHWC(values0, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z)); + CONVOLUTION1x5_NHWC(values0, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z)); + CONVOLUTION1x5_NHWC(values0, (src_addr + 4 * (int)src_stride_z), (weights_addr + 4 * (int)weights_stride_z)); +#endif // PAD_TOP > 0 + + src_addr += src_stride_x; + weights_addr += weights_stride_x; + } + +#ifdef HAS_BIAS + Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases); + values0 += (VEC_DATA_TYPE(DATA_TYPE, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, id0))); +#endif /* defined(HAS_BIAS) */ + + *((__global DATA_TYPE *)(dst.ptr + 0 * dst_stride_y)) = values0.s0; + *((__global DATA_TYPE *)(dst.ptr + 1 * dst_stride_y)) = values0.s1; + *((__global DATA_TYPE *)(dst.ptr + 2 * dst_stride_y)) = values0.s2; + *((__global DATA_TYPE *)(dst.ptr + 3 * dst_stride_y)) = values0.s3; + *((__global DATA_TYPE *)(dst.ptr + 4 * dst_stride_y)) = values0.s4; + *((__global DATA_TYPE *)(dst.ptr + 5 * dst_stride_y)) = values0.s5; + *((__global DATA_TYPE *)(dst.ptr + 6 * dst_stride_y)) = values0.s6; + *((__global DATA_TYPE *)(dst.ptr + 7 * dst_stride_y)) = values0.s7; +} + +#endif // defined(DATA_LAYOUT_NHWC) + /** This kernel performs a direct convolution to convolve the low three dimensions. * * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float @@ -119,7 +303,7 @@ __kernel void direct_convolution5x5( Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); VEC_DATA_TYPE(DATA_TYPE, 8) - pixels0 = 0; + values0 = 0; __global uchar *weights_addr = (__global uchar *)tensor3D_offset(&weights, 0, 0, 0); __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0); @@ -129,11 +313,11 @@ __kernel void direct_convolution5x5( for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d) { - CONVOLUTION1x5(pixels0, (__global DATA_TYPE *)src_addr, (__global DATA_TYPE *)weights_addr); - CONVOLUTION1x5(pixels0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y)); - CONVOLUTION1x5(pixels0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y)); - CONVOLUTION1x5(pixels0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 3 * weights_stride_y)); - CONVOLUTION1x5(pixels0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 4 * weights_stride_y)); + CONVOLUTION1x5(values0, (__global DATA_TYPE *)src_addr, (__global DATA_TYPE *)weights_addr); + CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_y)); + CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_y)); + CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 3 * weights_stride_y)); + CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_y), (__global DATA_TYPE *)(weights_addr + 4 * weights_stride_y)); src_addr += src_stride_z; weights_addr += weights_stride_z; @@ -142,10 +326,10 @@ __kernel void direct_convolution5x5( #ifdef HAS_BIAS Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases); - pixels0 += (VEC_DATA_TYPE(DATA_TYPE, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, kernel_index))); + values0 += (VEC_DATA_TYPE(DATA_TYPE, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, kernel_index))); #endif /* defined(HAS_BIAS) */ - vstore8(pixels0, 0, (__global DATA_TYPE *)dst.ptr); + vstore8(values0, 0, (__global DATA_TYPE *)dst.ptr); } #endif // defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) @@ -226,8 +410,8 @@ __kernel void direct_convolution5x5_f32_bifrost( Image src = CONVERT_TO_IMAGE_STRUCT(src); Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); - float4 pixels0 = 0.0f; - float4 pixels1 = 0.0f; + float4 values0 = 0.0f; + float4 values1 = 0.0f; __global uchar *weights_addr = (__global uchar *)(weights_ptr + weights_offset_first_element_in_bytes + kernel_index * weights_stride_w); __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0); @@ -247,14 +431,14 @@ __kernel void direct_convolution5x5_f32_bifrost( src0 = vload8(0, (__global float *)(src_addr + 0 * src_stride_y)); // Accumulate - CONVOLUTION1x5_BIFROST(pixels0, src0, weights_row00, weights_row01); + CONVOLUTION1x5_BIFROST(values0, src0, weights_row00, weights_row01); // Load values from row1 of input tensor src0 = vload8(0, (__global float *)(src_addr + 1 * src_stride_y)); // Accumulate - CONVOLUTION1x5_BIFROST(pixels0, src0, weights_row10, weights_row11); - CONVOLUTION1x5_BIFROST(pixels1, src0, weights_row00, weights_row01); + CONVOLUTION1x5_BIFROST(values0, src0, weights_row10, weights_row11); + CONVOLUTION1x5_BIFROST(values1, src0, weights_row00, weights_row01); // Load values from row2 of input tensor src0 = vload8(0, (__global float *)(src_addr + 2 * src_stride_y)); @@ -264,8 +448,8 @@ __kernel void direct_convolution5x5_f32_bifrost( weights_row01 = *((__global float *)(weights_addr + 2 * weights_stride_y) + 4); // Accumulate - CONVOLUTION1x5_BIFROST(pixels0, src0, weights_row00, weights_row01); - CONVOLUTION1x5_BIFROST(pixels1, src0, weights_row10, weights_row11); + CONVOLUTION1x5_BIFROST(values0, src0, weights_row00, weights_row01); + CONVOLUTION1x5_BIFROST(values1, src0, weights_row10, weights_row11); // Load values from row3 of input tensor src0 = vload8(0, (__global float *)(src_addr + 3 * src_stride_y)); @@ -275,8 +459,8 @@ __kernel void direct_convolution5x5_f32_bifrost( weights_row11 = *((__global float *)(weights_addr + 3 * weights_stride_y) + 4); // Accumulate - CONVOLUTION1x5_BIFROST(pixels0, src0, weights_row10, weights_row11); - CONVOLUTION1x5_BIFROST(pixels1, src0, weights_row00, weights_row01); + CONVOLUTION1x5_BIFROST(values0, src0, weights_row10, weights_row11); + CONVOLUTION1x5_BIFROST(values1, src0, weights_row00, weights_row01); // Load values from row4 of input tensor src0 = vload8(0, (__global float *)(src_addr + 4 * src_stride_y)); @@ -285,14 +469,14 @@ __kernel void direct_convolution5x5_f32_bifrost( weights_row00 = vload4(0, (__global float *)(weights_addr + 4 * weights_stride_y)); weights_row01 = *((__global float *)(weights_addr + 4 * weights_stride_y) + 4); - CONVOLUTION1x5_BIFROST(pixels0, src0, weights_row00, weights_row01); - CONVOLUTION1x5_BIFROST(pixels1, src0, weights_row10, weights_row11); + CONVOLUTION1x5_BIFROST(values0, src0, weights_row00, weights_row01); + CONVOLUTION1x5_BIFROST(values1, src0, weights_row10, weights_row11); // Load values from row5 of input tensor src0 = vload8(0, (__global float *)(src_addr + 5 * src_stride_y)); // Accumulate - CONVOLUTION1x5_BIFROST(pixels1, src0, weights_row00, weights_row01); + CONVOLUTION1x5_BIFROST(values1, src0, weights_row00, weights_row01); src_addr += src_stride_z; weights_addr += weights_stride_z; @@ -303,11 +487,11 @@ __kernel void direct_convolution5x5_f32_bifrost( float4 bias = (float4) * ((__global float *)(vector_offset(&biases, kernel_index))); - pixels0 += bias; - pixels1 += bias; + values0 += bias; + values1 += bias; #endif /* defined(HAS_BIAS) */ - vstore4(pixels0, 0, (__global float *)(dst.ptr + 0 * dst_stride_y)); - vstore4(pixels1, 0, (__global float *)(dst.ptr + 1 * dst_stride_y)); + vstore4(values0, 0, (__global float *)(dst.ptr + 0 * dst_stride_y)); + vstore4(values1, 0, (__global float *)(dst.ptr + 1 * dst_stride_y)); } #endif // defined(WEIGHTS_DEPTH) diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp index 7f7437d6ef..754f0d8f23 100644 --- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp +++ b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp @@ -47,19 +47,20 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(0) != weights->dimension(1), - "Weights should have same width as length"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(0) != 1 && weights->dimension(0) != 3 && weights->dimension(0) != 5, + + const DataLayout data_layout = input->data_layout(); + const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); + + ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != weights->dimension(height_idx), "Weights should have same width and height"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5, "Kernel sizes other than 1x1, 3x3 or 5x5 are not supported"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(2) != input->dimension(2), + ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(channel_idx) != input->dimension(channel_idx), "Weights feature map dimension should match the respective input's one"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(0) != weights->dimension(1), - "Only rectangular weights are supported!"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->num_dimensions() > 4, - "Weights can be at most 4 dimensional"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(0) == 1) && std::get<0>(conv_info.stride()) > 3, - "Strides larger than 3 not supported for 1x1 convolution."); - ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(0) == 3 || weights->dimension(0) == 5) && std::get<0>(conv_info.stride()) > 2, + ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->num_dimensions() > 4, "Weights can be at most 4 dimensional"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 1) && std::get<0>(conv_info.stride()) > 3, "Strides larger than 3 not supported for 1x1 convolution."); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((weights->dimension(width_idx) == 3 || weights->dimension(width_idx) == 5) && std::get<0>(conv_info.stride()) > 2, "Strides larger than 2 not supported for 3x3 convolution."); if(biases != nullptr) @@ -89,36 +90,27 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, return Status{}; } -std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, const GPUTarget target) +inline bool can_run_optimized_kernel_for_bifrost(GPUTarget gpu_target, unsigned int conv_stride_x, unsigned int conv_stride_y, unsigned int kernel_size, + DataType data_type, DataLayout data_layout) { - const unsigned int kernel_size = weights->dimension(0); - const DataType data_type = input->data_type(); - - // Get convolved dimensions - TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*input, *weights, conv_info); - - // Output auto inizialitation if not yet initialized - // FIXME: input->clone()->set_tensor_shape(output_shape) doesn't work with subtensors for grouped direct convolutions (AlexNet). - auto_init_if_empty(*output, output_shape, - 1, - input->data_type(), - input->quantization_info()); + return gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::G76) && (kernel_size <= 5) + && (conv_stride_x == 1) && (conv_stride_y == 1) && (data_type == DataType::F32) && (data_layout == DataLayout::NCHW); +} - unsigned int conv_stride_x = std::get<0>(conv_info.stride()); - unsigned int conv_stride_y = std::get<1>(conv_info.stride()); - unsigned int conv_pad_left = conv_info.pad_left(); - unsigned int conv_pad_top = conv_info.pad_top(); +inline void setup_num_elems(unsigned int &num_elems_read_per_iteration_x, unsigned int &num_elems_read_per_iteration_y, + unsigned int &num_elems_written_per_iteration_x, unsigned int &num_elems_written_per_iteration_y, + unsigned int kernel_size, const PadStrideInfo &conv_info, const GPUTarget target, ITensorInfo *input) +{ + const DataType data_type = input->data_type(); + const DataLayout data_layout = input->data_layout(); + unsigned int conv_stride_x = std::get<0>(conv_info.stride()); + unsigned int conv_stride_y = std::get<1>(conv_info.stride()); - unsigned int num_elems_read_per_iteration_x = 0; - unsigned int num_elems_read_per_iteration_y = 0; - unsigned int num_elems_written_per_iteration_x = 0; - unsigned int num_elems_written_per_iteration_y = 0; + const bool run_optimized_bifrost = can_run_optimized_kernel_for_bifrost(target, conv_stride_x, conv_stride_y, kernel_size, data_type, data_layout); - if(gpu_target_is_in(target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::G76) && (kernel_size <= 5) && (conv_stride_x == 1) - && (conv_stride_y == 1) && (data_type == DataType::F32)) + if(run_optimized_bifrost) { // Configure kernel window - switch(kernel_size) { case 1: @@ -218,22 +210,124 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen } } - // Create window and update padding - bool window_changed = false; - Window win = calculate_max_window(*output, Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y)); + if(data_layout == DataLayout::NHWC) + { + num_elems_written_per_iteration_x = 1; + num_elems_read_per_iteration_x = 1; + switch(kernel_size) + { + case 1: + switch(conv_stride_x) + { + case 1: + num_elems_read_per_iteration_y = 8; + num_elems_written_per_iteration_y = 8; + break; + case 2: + num_elems_read_per_iteration_y = 16; + num_elems_written_per_iteration_y = 8; + break; + default: + ARM_COMPUTE_ERROR("Invalid convolution stride X"); + } + break; + case 3: + switch(conv_stride_x) + { + case 1: + num_elems_read_per_iteration_y = 10; + num_elems_written_per_iteration_y = 8; + break; + case 2: + num_elems_read_per_iteration_y = 17; + num_elems_written_per_iteration_y = 8; + break; + default: + ARM_COMPUTE_ERROR("Invalid convolution stride X"); + } + break; + case 5: + switch(conv_stride_x) + { + case 1: + num_elems_read_per_iteration_y = 12; + num_elems_written_per_iteration_y = 8; + break; + case 2: + num_elems_read_per_iteration_y = 20; + num_elems_written_per_iteration_y = 8; + break; + default: + ARM_COMPUTE_ERROR("Invalid convolution stride X"); + } + break; + default: + ARM_COMPUTE_ERROR("Not implemented."); + break; + } + } +} + +std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, const GPUTarget target) +{ + const DataLayout data_layout = input->data_layout(); + const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const unsigned int kernel_size = weights->dimension(width_idx); + + // Get convolved dimensions + TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*input, *weights, conv_info); - AccessWindowRectangle input_access(input, -conv_pad_left, -conv_pad_top, - num_elems_read_per_iteration_x, num_elems_read_per_iteration_y, - conv_stride_x, conv_stride_y); - AccessWindowStatic weights_access(weights, 0, 0, kernel_size, kernel_size); - AccessWindowRectangle output_access(output, 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y); + // Output auto inizialitation if not yet initialized + // FIXME: input->clone()->set_tensor_shape(output_shape) doesn't work with subtensors for grouped direct convolutions (AlexNet). + auto_init_if_empty(*output, output_shape, + 1, + input->data_type(), + input->quantization_info()); - window_changed = update_window_and_padding(win, input_access, weights_access, output_access); + unsigned int num_elems_read_per_iteration_x = 0; + unsigned int num_elems_read_per_iteration_y = 0; + unsigned int num_elems_written_per_iteration_x = 0; + unsigned int num_elems_written_per_iteration_y = 0; + + unsigned int conv_pad_left = conv_info.pad_left(); + unsigned int conv_pad_top = conv_info.pad_top(); + unsigned int conv_stride_x = std::get<0>(conv_info.stride()); + unsigned int conv_stride_y = std::get<1>(conv_info.stride()); - output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); + setup_num_elems(num_elems_read_per_iteration_x, num_elems_read_per_iteration_y, + num_elems_written_per_iteration_x, num_elems_written_per_iteration_y, + kernel_size, conv_info, target, input); - Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; - return std::make_pair(err, win); + // Create window and update padding + bool window_changed = false; + Window win = calculate_max_window(*output, Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y)); + + if(data_layout == DataLayout::NHWC) + { + AccessWindowStatic input_access(input, 0, -conv_pad_left, + num_elems_read_per_iteration_x, + ceil_to_multiple(input->dimension(1) + conv_info.pad_right(), num_elems_read_per_iteration_y)); + AccessWindowStatic weights_access(weights, 0, 0, weights->dimension(0), weights->dimension(1)); + AccessWindowRectangle output_access(output, 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y); + window_changed = update_window_and_padding(win, input_access, weights_access, output_access); + output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); + Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; + return std::make_pair(err, win); + } + else if(data_layout == DataLayout::NCHW) + { + AccessWindowRectangle input_access(input, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration_x, num_elems_read_per_iteration_y, conv_stride_x, conv_stride_y); + AccessWindowStatic weights_access(weights, 0, 0, kernel_size, kernel_size); + AccessWindowRectangle output_access(output, 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y); + window_changed = update_window_and_padding(win, input_access, weights_access, output_access); + output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape())); + Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; + return std::make_pair(err, win); + } + else + { + ARM_COMPUTE_ERROR("Not supported"); + } } } // namespace @@ -251,7 +345,12 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL { ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); - const unsigned int kernel_size = weights->info()->dimension(0); + const DataLayout data_layout = input->info()->data_layout(); + const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); + + const unsigned int kernel_size = weights->info()->dimension(width_idx); const DataType data_type = input->info()->data_type(); // Get convolved dimensions @@ -274,7 +373,19 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL _conv_stride_x = std::get<0>(conv_info.stride()); _conv_stride_y = std::get<1>(conv_info.stride()); - _border_size = BorderSize(conv_info.pad_top(), conv_info.pad_right(), conv_info.pad_bottom(), conv_info.pad_left()); + + if(data_layout == DataLayout::NHWC) + { + _border_size = BorderSize(conv_info.pad_left(), 0, conv_info.pad_right(), 0); + } + else if(data_layout == DataLayout::NCHW) + { + _border_size = BorderSize(conv_info.pad_top(), conv_info.pad_right(), conv_info.pad_bottom(), conv_info.pad_left()); + } + else + { + ARM_COMPUTE_ERROR("Not supported"); + } _input = input; _weights = weights; @@ -285,14 +396,19 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL std::stringstream kernel_name; kernel_name << "direct_convolution" << kernel_size << "x" << kernel_size; + if(data_layout == DataLayout::NHWC) + { + kernel_name << "_" << lower_string(string_from_data_layout(data_layout)); + } CLBuildOptions build_options; build_options.add_option_if(_biases != nullptr, std::string("-DHAS_BIAS")); - if(gpu_target_is_in(gpu_target, GPUTarget::G71, GPUTarget::G72, GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT, GPUTarget::G76) && (kernel_size <= 5) && (_conv_stride_x == 1) - && (_conv_stride_y == 1) && (data_type == DataType::F32)) + const bool run_optimized_for_bifrost = can_run_optimized_kernel_for_bifrost(gpu_target, _conv_stride_x, _conv_stride_y, kernel_size, data_type, data_layout); + + if(run_optimized_for_bifrost) { - build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(2)))); + build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(channel_idx)))); kernel_name << "_f32_bifrost"; _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name.str(), build_options.options())); @@ -304,10 +420,20 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL build_options.add_option_if(is_quantized_asymm, std::string("-DKERNEL_SIZE=" + support::cpp11::to_string(kernel_size))); build_options.add_option(std::string("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type))); build_options.add_option(std::string("-DDATA_SIZE=" + get_data_size_from_data_type(data_type))); - build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(2)))); + build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(channel_idx)))); build_options.add_option(std::string("-DSTRIDE_X=" + support::cpp11::to_string(_conv_stride_x))); + if(data_layout == DataLayout::NHWC) + { + build_options.add_option(std::string("-DDATA_LAYOUT_NHWC=1")); + build_options.add_option(std::string("-DDST_HEIGHT=" + support::cpp11::to_string(_output->info()->dimension(height_idx)))); + build_options.add_option(std::string("-DDST_WIDTH=" + support::cpp11::to_string(_output->info()->dimension(width_idx)))); + build_options.add_option(std::string("-DSRC_HEIGHT=" + support::cpp11::to_string(_input->info()->dimension(height_idx)))); + build_options.add_option(std::string("-DSRC_WIDTH=" + support::cpp11::to_string(_input->info()->dimension(width_idx)))); + build_options.add_option(std::string("-DPAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left()))); + build_options.add_option(std::string("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()))); + build_options.add_option(std::string("-DSTRIDE_Y=" + support::cpp11::to_string(_conv_stride_y))); + } build_options.add_option(std::string("-DDATA_TYPE_PROMOTED=" + get_cl_type_from_data_type(data_type))); - // Create kernel _kernel = static_cast(CLKernelLibrary::get().create_kernel(is_quantized_asymm ? "direct_convolution_1x1_3x3_5x5_quantized" : kernel_name.str(), build_options.options())); @@ -353,9 +479,11 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL _config_id += "_"; _config_id += support::cpp11::to_string(_conv_stride_y); _config_id += "_"; - _config_id += support::cpp11::to_string(output->info()->dimension(0)); + _config_id += support::cpp11::to_string(output->info()->dimension(width_idx)); _config_id += "_"; - _config_id += support::cpp11::to_string(output->info()->dimension(1)); + _config_id += support::cpp11::to_string(output->info()->dimension(height_idx)); + _config_id += "_"; + _config_id += lower_string(string_from_data_layout(data_layout)); } Status CLDirectConvolutionLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, @@ -378,12 +506,16 @@ void CLDirectConvolutionLayerKernel::run(const Window &window, cl::CommandQueue win_in.adjust(Window::DimX, -_border_size.left, true); win_in.adjust(Window::DimY, -_border_size.top, true); - win_in.set_dimension_step(Window::DimX, window.x().step() * _conv_stride_x); - win_in.set_dimension_step(Window::DimY, window.y().step() * _conv_stride_y); - Window slice_in = win_in.first_slice_window_3D(); + const DataLayout data_layout = _input->info()->data_layout(); + const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + + win_in.set_dimension_step(width_idx, window[width_idx].step() * _conv_stride_x); + win_in.set_dimension_step(height_idx, window[height_idx].step() * _conv_stride_y); - unsigned int idx1 = 2 * num_arguments_per_3D_tensor(); + Window slice_in = win_in.first_slice_window_3D(); + unsigned int idx1 = 2 * num_arguments_per_3D_tensor(); add_3D_tensor_argument(idx1, _weights, slice); if(_biases != nullptr) @@ -400,7 +532,6 @@ void CLDirectConvolutionLayerKernel::run(const Window &window, cl::CommandQueue unsigned int idx = 0; add_3D_tensor_argument(idx, _input, slice_in); add_3D_tensor_argument(idx, _output, slice); - enqueue(queue, *this, slice, _lws_hint); } while(window.slide_window_slice_3D(slice) && win_in.slide_window_slice_3D(slice_in)); -- cgit v1.2.1