From 1246b63ca04cb067f26ae860688647224d6ba24e Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Wed, 16 Aug 2017 18:38:32 +0100 Subject: COMPMID-477 - Optimized Direct Convolution 3x3 and 5x5 (f32) for Bifrost. Each work-item computes 4x3 output elements in case of 3x3 convolution and 4x2 in case of 5x5 convolution Change-Id: I6ebbaff8b7e971c1f90d5845c0b58d2a40f39df5 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/84345 Reviewed-by: Anthony Barbier Tested-by: Kaizen --- src/core/CL/CLKernelLibrary.cpp | 2 + src/core/CL/cl_kernels/direct_convolution3x3.cl | 155 ++++++++++++++++++- src/core/CL/cl_kernels/direct_convolution5x5.cl | 168 ++++++++++++++++++++- .../CL/kernels/CLDirectConvolutionLayerKernel.cpp | 139 ++++++++++++----- 4 files changed, 419 insertions(+), 45 deletions(-) (limited to 'src/core/CL') diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 1647a37ce0..cda2c5afe1 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -147,7 +147,9 @@ const std::map CLKernelLibrary::_kernel_program_map = { "dilate", "dilate.cl" }, { "direct_convolution1x1", "direct_convolution1x1.cl" }, { "direct_convolution3x3", "direct_convolution3x3.cl" }, + { "direct_convolution3x3_f32_bifrost", "direct_convolution3x3.cl" }, { "direct_convolution5x5", "direct_convolution5x5.cl" }, + { "direct_convolution5x5_f32_bifrost", "direct_convolution5x5.cl" }, { "erode", "erode.cl" }, { "fast_corners", "fast_corners.cl" }, { "fill_image_borders_constant", "fill_border.cl" }, diff --git a/src/core/CL/cl_kernels/direct_convolution3x3.cl b/src/core/CL/cl_kernels/direct_convolution3x3.cl index 28da544f89..26f24e187b 100644 --- a/src/core/CL/cl_kernels/direct_convolution3x3.cl +++ b/src/core/CL/cl_kernels/direct_convolution3x3.cl @@ -40,6 +40,8 @@ MULQ_SAT_IMPL(qs32x8, qs32x8) #endif /* FIXED_POINT_POSITION */ +#if defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) + #if STRIDE_X == 1 #define CONVOLUTION1x3(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x3_STRIDE1(acc, src_row_ptr, weights_row_ptr) #elif STRIDE_X == 2 /* STRIDE_X == 1 */ @@ -77,11 +79,12 @@ MULQ_SAT_IMPL(qs32x8, qs32x8) /** This kernel performs a direct convolution to convolve the low three dimensions. * + * @note This OpenCL kernel works with stride_x = 1 and 2 * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH - * @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row. + * @note If biases are used then -DHAS_BIAS has to be passed at compile time * - * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32 + * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/QS16/F16/F32 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -111,7 +114,6 @@ MULQ_SAT_IMPL(qs32x8, qs32x8) * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension */ -#if defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) __kernel void direct_convolution3x3( TENSOR3D_DECLARATION(src), TENSOR3D_DECLARATION(dst), @@ -152,4 +154,149 @@ __kernel void direct_convolution3x3( vstore8(CONVERT_SAT(pixels0, VEC_DATA_TYPE(DATA_TYPE, 8)), 0, (__global DATA_TYPE *)dst.ptr); } -#endif // defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) +#endif //defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) + +#if defined(WEIGHTS_DEPTH) + +#define CONVOLUTION1x3_BIFROST(acc, src0, src1, weights_row0) \ + ({ \ + acc.s0 = mad(src0.s0, weights_row0.s0, acc.s0); \ + acc.s1 = mad(src0.s1, weights_row0.s0, acc.s1); \ + acc.s2 = mad(src0.s2, weights_row0.s0, acc.s2); \ + acc.s3 = mad(src0.s3, weights_row0.s0, acc.s3); \ + acc.s0 = mad(src0.s1, weights_row0.s1, acc.s0); \ + acc.s1 = mad(src0.s2, weights_row0.s1, acc.s1); \ + acc.s2 = mad(src0.s3, weights_row0.s1, acc.s2); \ + acc.s3 = mad(src1.s0, weights_row0.s1, acc.s3); \ + acc.s0 = mad(src0.s2, weights_row0.s2, acc.s0); \ + acc.s1 = mad(src0.s3, weights_row0.s2, acc.s1); \ + acc.s2 = mad(src1.s0, weights_row0.s2, acc.s2); \ + acc.s3 = mad(src1.s1, weights_row0.s2, acc.s3); \ + }) + +/** An optimized direct convolution 3x3 OpenCL kernel for Bifrost architectures when the data type is F32 + * + * @note This OpenCL kernel works only with stride_x and stride_y equal to 1 + * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH + * @note In case biases, -DHAS_BIAS must to be passed at compile + * + * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + * @param[out] weights_ptr Pointer to the weights tensor. Supported data types: same as @p weights_ptr + * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) + * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) + * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes) + * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes) + * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor + * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr + * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes) + * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor + * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension + */ +__kernel void direct_convolution3x3_f32_bifrost( + TENSOR3D_DECLARATION(src), + TENSOR3D_DECLARATION(dst), + TENSOR3D_DECLARATION(weights), +#ifdef HAS_BIAS + VECTOR_DECLARATION(biases), +#endif /* defined(HAS_BIAS) */ + unsigned int weights_stride_w) +{ + // Get the kernel index + const int kernel_index = get_global_id(2); + + Image src = CONVERT_TO_IMAGE_STRUCT(src); + Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); + + float4 pixels0 = 0; + float4 pixels1 = 0; + float4 pixels2 = 0; + + __global uchar *weights_addr = (__global uchar *)(weights_ptr + weights_offset_first_element_in_bytes + kernel_index * weights_stride_w); + __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0); + + // Note: Since each work-item computes 4x3 elements, we need to load 5 rows from the input tensor + + for(ushort d = 0; d < (ushort)WEIGHTS_DEPTH; ++d) + { + // Load the weights + float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y)); + float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y)); + float3 weights_row2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y)); + float4 src0; + float2 src1; + + // Load values from row0 of input tensor + src0 = vload4(0, (__global float *)(src_addr + 0 * src_stride_y)); + src1 = vload2(0, (__global float *)(src_addr + 0 * src_stride_y) + 4); + + CONVOLUTION1x3_BIFROST(pixels0, src0, src1, weights_row0); + + // Load values from row1 of input tensor + src0 = vload4(0, (__global float *)(src_addr + 1 * src_stride_y)); + src1 = vload2(0, (__global float *)(src_addr + 1 * src_stride_y) + 4); + + // Accumulate + CONVOLUTION1x3_BIFROST(pixels0, src0, src1, weights_row1); + CONVOLUTION1x3_BIFROST(pixels1, src0, src1, weights_row0); + + // Load values from row2 of input tensor + src0 = vload4(0, (__global float *)(src_addr + 2 * src_stride_y)); + src1 = vload2(0, (__global float *)(src_addr + 2 * src_stride_y) + 4); + + // Accumulate + CONVOLUTION1x3_BIFROST(pixels0, src0, src1, weights_row2); + CONVOLUTION1x3_BIFROST(pixels1, src0, src1, weights_row1); + CONVOLUTION1x3_BIFROST(pixels2, src0, src1, weights_row0); + + // Load values from row3 of input tensor + src0 = vload4(0, (__global float *)(src_addr + 3 * src_stride_y)); + src1 = vload2(0, (__global float *)(src_addr + 3 * src_stride_y) + 4); + + // Accumulate + CONVOLUTION1x3_BIFROST(pixels1, src0, src1, weights_row2); + CONVOLUTION1x3_BIFROST(pixels2, src0, src1, weights_row1); + + // Row4 + src0 = vload4(0, (__global float *)(src_addr + 4 * src_stride_y)); + src1 = vload2(0, (__global float *)(src_addr + 4 * src_stride_y) + 4); + + // Accumulate + CONVOLUTION1x3_BIFROST(pixels2, src0, src1, weights_row2); + + src_addr += src_stride_z; + weights_addr += weights_stride_z; + } + +#ifdef HAS_BIAS + Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases); + + float4 bias = (float4) * ((__global float *)(vector_offset(&biases, kernel_index))); + + pixels0 += bias; + pixels1 += bias; + pixels2 += bias; +#endif /* defined(HAS_BIAS) */ + + vstore4(pixels0, 0, (__global float *)(dst.ptr + 0 * dst_stride_y)); + vstore4(pixels1, 0, (__global float *)(dst.ptr + 1 * dst_stride_y)); + vstore4(pixels2, 0, (__global float *)(dst.ptr + 2 * dst_stride_y)); +} +#endif // defined(WEIGHTS_DEPTH) diff --git a/src/core/CL/cl_kernels/direct_convolution5x5.cl b/src/core/CL/cl_kernels/direct_convolution5x5.cl index d8c0d891d7..496da97a09 100644 --- a/src/core/CL/cl_kernels/direct_convolution5x5.cl +++ b/src/core/CL/cl_kernels/direct_convolution5x5.cl @@ -25,6 +25,8 @@ #undef CONVERT_SAT +#if defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) + #if STRIDE_X == 1 #define CONVOLUTION1x5(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x5_STRIDE1(acc, src_row_ptr, weights_row_ptr) #elif STRIDE_X == 2 /* STRIDE_X == 1 */ @@ -71,7 +73,7 @@ * * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH - * @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row. + * @note If biases are used then -DHAS_BIAS has to be passed at compile time * * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) @@ -103,7 +105,6 @@ * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension */ -#if defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) __kernel void direct_convolution5x5( TENSOR3D_DECLARATION(src), TENSOR3D_DECLARATION(dst), @@ -147,3 +148,166 @@ __kernel void direct_convolution5x5( vstore8(pixels0, 0, (__global DATA_TYPE *)dst.ptr); } #endif // defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) + +#if defined(WEIGHTS_DEPTH) + +#define CONVOLUTION1x5_BIFROST(acc, src0, weights_row00, weights_row01) \ + ({ \ + acc.s0 = mad(src0.s0, weights_row00.s0, acc.s0); \ + acc.s1 = mad(src0.s1, weights_row00.s0, acc.s1); \ + acc.s2 = mad(src0.s2, weights_row00.s0, acc.s2); \ + acc.s3 = mad(src0.s3, weights_row00.s0, acc.s3); \ + acc.s0 = mad(src0.s1, weights_row00.s1, acc.s0); \ + acc.s1 = mad(src0.s2, weights_row00.s1, acc.s1); \ + acc.s2 = mad(src0.s3, weights_row00.s1, acc.s2); \ + acc.s3 = mad(src0.s4, weights_row00.s1, acc.s3); \ + acc.s0 = mad(src0.s2, weights_row00.s2, acc.s0); \ + acc.s1 = mad(src0.s3, weights_row00.s2, acc.s1); \ + acc.s2 = mad(src0.s4, weights_row00.s2, acc.s2); \ + acc.s3 = mad(src0.s5, weights_row00.s2, acc.s3); \ + acc.s0 = mad(src0.s3, weights_row00.s3, acc.s0); \ + acc.s1 = mad(src0.s4, weights_row00.s3, acc.s1); \ + acc.s2 = mad(src0.s5, weights_row00.s3, acc.s2); \ + acc.s3 = mad(src0.s6, weights_row00.s3, acc.s3); \ + acc.s0 = mad(src0.s4, weights_row01, acc.s0); \ + acc.s1 = mad(src0.s5, weights_row01, acc.s1); \ + acc.s2 = mad(src0.s6, weights_row01, acc.s2); \ + acc.s3 = mad(src0.s7, weights_row01, acc.s3); \ + }) + +/** An optimized direct convolution 5x5 OpenCL kernel for Bifrost architectures when the data type is F32 + * + * @note This OpenCL kernel works only with stride_x and stride_y equal to 1 + * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH + * @note If biases are used then -DHAS_BIAS has to be passed at compile time + * + * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + * @param[out] weights_ptr Pointer to the weights tensor. Supported data types: same as @p weights_ptr + * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) + * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) + * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes) + * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes) + * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor + * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr + * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes) + * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor + * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension + */ +__kernel void direct_convolution5x5_f32_bifrost( + TENSOR3D_DECLARATION(src), + TENSOR3D_DECLARATION(dst), + TENSOR3D_DECLARATION(weights), +#ifdef HAS_BIAS + VECTOR_DECLARATION(biases), +#endif /* defined(HAS_BIAS) */ + unsigned int weights_stride_w) +{ + // Get the kernel index + const int kernel_index = get_global_id(2); + + Image src = CONVERT_TO_IMAGE_STRUCT(src); + Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst); + + float4 pixels0 = 0.0f; + float4 pixels1 = 0.0f; + + __global uchar *weights_addr = (__global uchar *)(weights_ptr + weights_offset_first_element_in_bytes + kernel_index * weights_stride_w); + __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0); + + // Note: Since each work-item computes 4x2 elements, we need to load 6 rows from the input tensor + + for(ushort d = 0; d < (ushort)WEIGHTS_DEPTH; ++d) + { + // Load the weights from row0 and row1 + float4 weights_row00 = vload4(0, (__global float *)(weights_addr + 0 * weights_stride_y)); + float weights_row01 = *((__global float *)(weights_addr + 0 * weights_stride_y) + 4); + float4 weights_row10 = vload4(0, (__global float *)(weights_addr + 1 * weights_stride_y)); + float weights_row11 = *((__global float *)(weights_addr + 1 * weights_stride_y) + 4); + float8 src0; + + // Load values from row0 of input tensor + src0 = vload8(0, (__global float *)(src_addr + 0 * src_stride_y)); + + // Accumulate + CONVOLUTION1x5_BIFROST(pixels0, src0, weights_row00, weights_row01); + + // Load values from row1 of input tensor + src0 = vload8(0, (__global float *)(src_addr + 1 * src_stride_y)); + + // Accumulate + CONVOLUTION1x5_BIFROST(pixels0, src0, weights_row10, weights_row11); + CONVOLUTION1x5_BIFROST(pixels1, src0, weights_row00, weights_row01); + + // Load values from row2 of input tensor + src0 = vload8(0, (__global float *)(src_addr + 2 * src_stride_y)); + + // Load weights from row2 + weights_row00 = vload4(0, (__global float *)(weights_addr + 2 * weights_stride_y)); + weights_row01 = *((__global float *)(weights_addr + 2 * weights_stride_y) + 4); + + // Accumulate + CONVOLUTION1x5_BIFROST(pixels0, src0, weights_row00, weights_row01); + CONVOLUTION1x5_BIFROST(pixels1, src0, weights_row10, weights_row11); + + // Load values from row3 of input tensor + src0 = vload8(0, (__global float *)(src_addr + 3 * src_stride_y)); + + // Load weights from row3 + weights_row10 = vload4(0, (__global float *)(weights_addr + 3 * weights_stride_y)); + weights_row11 = *((__global float *)(weights_addr + 3 * weights_stride_y) + 4); + + // Accumulate + CONVOLUTION1x5_BIFROST(pixels0, src0, weights_row10, weights_row11); + CONVOLUTION1x5_BIFROST(pixels1, src0, weights_row00, weights_row01); + + // Load values from row4 of input tensor + src0 = vload8(0, (__global float *)(src_addr + 4 * src_stride_y)); + + // Load weights from row4 + weights_row00 = vload4(0, (__global float *)(weights_addr + 4 * weights_stride_y)); + weights_row01 = *((__global float *)(weights_addr + 4 * weights_stride_y) + 4); + + CONVOLUTION1x5_BIFROST(pixels0, src0, weights_row00, weights_row01); + CONVOLUTION1x5_BIFROST(pixels1, src0, weights_row10, weights_row11); + + // Load values from row5 of input tensor + src0 = vload8(0, (__global float *)(src_addr + 5 * src_stride_y)); + + // Accumulate + CONVOLUTION1x5_BIFROST(pixels1, src0, weights_row00, weights_row01); + + src_addr += src_stride_z; + weights_addr += weights_stride_z; + } + +#ifdef HAS_BIAS + Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases); + + float4 bias = (float4) * ((__global float *)(vector_offset(&biases, kernel_index))); + + pixels0 += bias; + pixels1 += bias; +#endif /* defined(HAS_BIAS) */ + + vstore4(pixels0, 0, (__global float *)(dst.ptr + 0 * dst_stride_y)); + vstore4(pixels1, 0, (__global float *)(dst.ptr + 1 * dst_stride_y)); +} +#endif // defined(WEIGHTS_DEPTH) diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp index 1620d545c7..b012f59bd2 100644 --- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp +++ b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp @@ -99,68 +99,129 @@ void CLDirectConvolutionLayerKernel::configure(const ICLTensor *input, const ICL _biases = biases; _border_size = BorderSize(_conv_pad_y, _conv_pad_x); - std::stringstream kernel_name; std::set options; - kernel_name << "direct_convolution" << kernel_size << "x" << kernel_size; - DataType promoted_type = input->info()->data_type(); - options.emplace("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); - options.emplace("-DDATA_SIZE=" + get_data_size_from_data_type(input->info()->data_type())); - options.emplace("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(2))); - options.emplace("-DSTRIDE_X=" + support::cpp11::to_string(_conv_stride_x)); + const GPUTarget gpu_target = get_arch_from_target(get_target()); - if(is_data_type_fixed_point(input->info()->data_type())) + if(_biases != nullptr) + { + options.emplace("-DHAS_BIAS"); + } + + if((gpu_target == GPUTarget::BIFROST) && (kernel_size <= 5) && (_conv_stride_x == 1) && (_conv_stride_y == 1) && (input->info()->data_type() == DataType::F32)) { - options.emplace("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position())); + options.emplace("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(2))); + + std::string kernel_name = "direct_convolution" + support::cpp11::to_string(kernel_size) + "x" + support::cpp11::to_string(kernel_size) + "_f32_bifrost"; + _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name, options)); - switch(input->info()->data_type()) + // Configure kernel window + Window win = calculate_max_window(*output->info()); + + unsigned int num_elems_read_per_iteration_x = 0; + unsigned int num_elems_read_per_iteration_y = 0; + unsigned int num_elems_written_per_iteration_x = 0; + unsigned int num_elems_written_per_iteration_y = 0; + + switch(kernel_size) { - case DataType::QS8: - promoted_type = DataType::QS16; + case 3: + { + num_elems_read_per_iteration_x = 6; + num_elems_read_per_iteration_y = 5; + num_elems_written_per_iteration_x = 4; + num_elems_written_per_iteration_y = 3; break; - case DataType::QS16: - promoted_type = DataType::QS32; + } + case 5: + { + num_elems_read_per_iteration_x = 8; + num_elems_read_per_iteration_y = 6; + num_elems_written_per_iteration_x = 4; + num_elems_written_per_iteration_y = 2; break; + } default: - ARM_COMPUTE_ERROR("Datatype not supported"); + { + ARM_COMPUTE_ERROR("Kernel size not optimized for Bifrost"); + } } - } - options.emplace("-DDATA_TYPE_PROMOTED=" + get_cl_type_from_data_type(promoted_type)); + // Calculate right and bottom border + const int input_width = input->info()->dimension(0) - kernel_size / 2 + _conv_pad_x; + const int input_height = input->info()->dimension(1) - kernel_size / 2 + _conv_pad_y; - if(_biases != nullptr) - { - options.emplace("-DHAS_BIAS"); + // Create window and update padding + win = calculate_max_window(*output->info(), Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y)); + + AccessWindowStatic input_access(input->info(), -_conv_pad_x, -_conv_pad_y, input_width + num_elems_read_per_iteration_x, input_height + num_elems_read_per_iteration_y); + AccessWindowStatic weights_access(weights->info(), 0, 0, kernel_size, kernel_size); + AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y); + + update_window_and_padding(win, input_access, weights_access, output_access); + + output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape())); + + ICLKernel::configure(win); } + else + { + std::stringstream kernel_name; + kernel_name << "direct_convolution" << kernel_size << "x" << kernel_size; + DataType promoted_type = input->info()->data_type(); - _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name.str(), options)); + options.emplace("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); + options.emplace("-DDATA_SIZE=" + get_data_size_from_data_type(input->info()->data_type())); + options.emplace("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(2))); + options.emplace("-DSTRIDE_X=" + support::cpp11::to_string(_conv_stride_x)); - // Configure kernel window - Window win = calculate_max_window(*output->info()); + if(is_data_type_fixed_point(input->info()->data_type())) + { + options.emplace("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position())); + + switch(input->info()->data_type()) + { + case DataType::QS8: + promoted_type = DataType::QS16; + break; + case DataType::QS16: + promoted_type = DataType::QS32; + break; + default: + ARM_COMPUTE_ERROR("Datatype not supported"); + } + } + + options.emplace("-DDATA_TYPE_PROMOTED=" + get_cl_type_from_data_type(promoted_type)); + + _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name.str(), options)); - bool is_stride2 = ((kernel_size != 1) && (_conv_stride_x == 2)); + // Configure kernel window - const unsigned int num_elems_read_per_iteration_x = 8 + 2 * (kernel_size / 2) + (is_stride2 ? 6 + kernel_size / 2 : 0); - const unsigned int num_elems_read_per_iteration_y = kernel_size; - const unsigned int num_elems_written_per_iteration_x = 8; - const unsigned int num_elems_written_per_iteration_y = 1; + bool is_stride2 = ((kernel_size != 1) && (_conv_stride_x == 2)); - // Calculate right and bottom border - const int input_width = input->info()->dimension(0) - kernel_size / 2 + _conv_pad_x; - const int input_height = input->info()->dimension(1) - kernel_size / 2 + _conv_pad_y; + const unsigned int num_elems_read_per_iteration_x = 8 + 2 * (kernel_size / 2) + (is_stride2 ? 6 + kernel_size / 2 : 0); + const unsigned int num_elems_read_per_iteration_y = kernel_size; + const unsigned int num_elems_written_per_iteration_x = 8; + const unsigned int num_elems_written_per_iteration_y = 1; - // Create window and update padding - win = calculate_max_window(*output->info(), Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y)); + // Calculate right and bottom border + const int input_width = input->info()->dimension(0) - kernel_size / 2 + _conv_pad_x; + const int input_height = input->info()->dimension(1) - kernel_size / 2 + _conv_pad_y; - AccessWindowStatic input_access(input->info(), -_conv_pad_x, -_conv_pad_y, input_width + num_elems_read_per_iteration_x, input_height + num_elems_read_per_iteration_y); - AccessWindowStatic weights_access(weights->info(), 0, 0, kernel_size, kernel_size); - AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y); + // Create window and update padding + Window win = calculate_max_window(*output->info(), Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y)); - update_window_and_padding(win, input_access, weights_access, output_access); + AccessWindowStatic input_access(input->info(), -_conv_pad_x, -_conv_pad_y, input_width + num_elems_read_per_iteration_x, input_height + num_elems_read_per_iteration_y); + AccessWindowStatic weights_access(weights->info(), 0, 0, kernel_size, kernel_size); + AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y); - output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape())); + update_window_and_padding(win, input_access, weights_access, output_access); - ICLKernel::configure(win); + output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape())); + + ICLKernel::configure(win); + } } void CLDirectConvolutionLayerKernel::run(const Window &window, cl::CommandQueue &queue) -- cgit v1.2.1