From e73686ac797be2d19cd9bed26d690e1431e3d848 Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Mon, 8 Apr 2019 17:30:48 +0100 Subject: COMPMID-2047: Add support for dilation in CLDepthwiseConvolution. Change-Id: I3106aa34bd168985a56791613d95072756be6e9b Signed-off-by: Usama Arif Reviewed-on: https://review.mlplatform.org/c/958 Comments-Addressed: Arm Jenkins Reviewed-by: Pablo Marquez Tested-by: Arm Jenkins --- src/core/CL/cl_kernels/depthwise_convolution.cl | 363 +++++++++++++++++++-- .../cl_kernels/depthwise_convolution_quantized.cl | 171 +++++++--- .../CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp | 29 +- .../CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp | 51 +-- src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp | 16 +- 5 files changed, 525 insertions(+), 105 deletions(-) (limited to 'src/core') diff --git a/src/core/CL/cl_kernels/depthwise_convolution.cl b/src/core/CL/cl_kernels/depthwise_convolution.cl index 4f6fdfafee..8ee0185fe6 100644 --- a/src/core/CL/cl_kernels/depthwise_convolution.cl +++ b/src/core/CL/cl_kernels/depthwise_convolution.cl @@ -51,13 +51,18 @@ inline float2 convolution1x3_stride_1(__global const uchar *left_pixel, const float middle_coeff, const float right_coeff) { +#if(DILATION_X == 1 && DILATION_Y == 1) float4 temp = vload4(0, (__global float *)left_pixel); float2 left = CONVERT(temp.s01, float2); float2 middle = CONVERT(temp.s12, float2); float2 right = CONVERT(temp.s23, float2); - return left * (float2)left_coeff + middle * (float2)middle_coeff + right * (float2)right_coeff; +#else /* DILATION_X==1 && DILATION_Y==1 */ + return vload2(0, (__global float *)left_pixel) * (float2)left_coeff + + vload2(0, (__global float *)(left_pixel) + DILATION_X) * (float2)middle_coeff + + vload2(0, (__global float *)(left_pixel) + 2 * DILATION_X) * (float2)right_coeff; +#endif /* DILATION_X==1 && DILATION_Y==1 */ } /** Compute a 1D horizontal convolution of size 3 and stride 2 for floating point type. @@ -74,6 +79,7 @@ inline float2 convolution1x3_stride_2(__global const uchar *left_pixel, const float middle_coeff, const float right_coeff) { +#if(DILATION_X == 1 && DILATION_Y == 1) float4 temp0 = vload4(0, (__global float *)left_pixel); float temp1 = *((__global float *)(left_pixel + 4 * sizeof(float))); @@ -82,6 +88,14 @@ inline float2 convolution1x3_stride_2(__global const uchar *left_pixel, float2 right = CONVERT((float2)(temp0.s2, temp1), float2); return left * (float2)left_coeff + middle * (float2)middle_coeff + right * (float2)right_coeff; +#else /* DILATION_X==1 && DILATION_Y==1 */ + __global float *left_pixel_float = (__global float *)left_pixel; + + return vload4(0, left_pixel_float).s02 * (float2)left_coeff + + vload4(0, left_pixel_float + DILATION_X).s02 * (float2)middle_coeff + + vload4(0, left_pixel_float + DILATION_X * 2).s02 * (float2)right_coeff; + +#endif /* DILATION_X==1 && DILATION_Y==1 */ } /** Compute a 1D horizontal convolution of size 3 and stride 3 for floating point type. @@ -98,6 +112,7 @@ inline float2 convolution1x3_stride_3(__global const uchar *left_pixel, const float middle_coeff, const float right_coeff) { +#if(DILATION_X == 1 && DILATION_Y == 1) float4 temp0 = vload4(0, (__global float *)left_pixel); float2 temp1 = vload2(0, (__global float *)(left_pixel + 4 * sizeof(float))); @@ -106,6 +121,13 @@ inline float2 convolution1x3_stride_3(__global const uchar *left_pixel, float2 right = CONVERT((float2)(temp0.s2, temp1.s1), float2); return left * (float2)left_coeff + middle * (float2)middle_coeff + right * (float2)right_coeff; +#else /* DILATION_X==1 && DILATION_Y==1 */ + __global float *left_pixel_float = (__global float *)left_pixel; + + return (float2)(*left_pixel_float, *(left_pixel_float + 3)) * (float2)left_coeff + + (float2)(*(left_pixel_float + DILATION_X), *(left_pixel_float + DILATION_X + 3)) * (float2)middle_coeff + + (float2)(*(left_pixel_float + DILATION_X * 2), *(left_pixel_float + DILATION_X * 2 + 3)) * (float2)right_coeff; +#endif /* DILATION_X==1 && DILATION_Y==1 */ } /** Apply a 3x3 convolution matrix to a single channel F32 input image and return the result. @@ -139,8 +161,8 @@ inline float2 convolution3x3( float2 pixels; pixels = convolution1x3(offset(src, 0, 0), mat0, mat1, mat2); - pixels += convolution1x3(offset(src, 0, 1), mat3, mat4, mat5); - pixels += convolution1x3(offset(src, 0, 2), mat6, mat7, mat8); + pixels += convolution1x3(offset(src, 0, DILATION_Y), mat3, mat4, mat5); + pixels += convolution1x3(offset(src, 0, DILATION_Y * 2), mat6, mat7, mat8); return pixels; } @@ -216,6 +238,8 @@ __kernel void depthwise_convolution_3x3( } #endif //defined(CONV_STRIDE_X) +#if(DILATION_X == 1 && DILATION_Y == 1) + #define CONVOLUTION1x3_BIFROST2X1_STRIDE1(acc, src0, weights_row0) \ ({ \ acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \ @@ -268,6 +292,227 @@ __kernel void depthwise_convolution_3x3( acc.s3 = fma(src1.s0, weights_row0.s2, acc.s3); \ }) +#else /* DILATION_X==1 && DILATION_Y==1 */ + +#define CONVOLUTION1x3_BIFROST2X1_STRIDE1(acc, src0_left, src0_mid, src0_right, weights_row0) \ + ({ \ + acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \ + acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \ + acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \ + acc.s1 = fma(src0_left.s1, weights_row0.s0, acc.s1); \ + acc.s1 = fma(src0_mid.s1, weights_row0.s1, acc.s1); \ + acc.s1 = fma(src0_right.s1, weights_row0.s2, acc.s1); \ + }) + +#define CONVOLUTION1x3_BIFROST2X1_STRIDE2(acc, src0_left, src0_mid, src0_right, weights_row0) \ + ({ \ + acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \ + acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \ + acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \ + acc.s1 = fma(src0_left.s2, weights_row0.s0, acc.s1); \ + acc.s1 = fma(src0_mid.s2, weights_row0.s1, acc.s1); \ + acc.s1 = fma(src0_right.s2, weights_row0.s2, acc.s1); \ + }) + +#define CONVOLUTION1x3_BIFROST4X1_STRIDE1(acc, src0_left, src0_mid, src0_right, weights_row0) \ + ({ \ + acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \ + acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \ + acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \ + acc.s1 = fma(src0_left.s1, weights_row0.s0, acc.s1); \ + acc.s1 = fma(src0_mid.s1, weights_row0.s1, acc.s1); \ + acc.s1 = fma(src0_right.s1, weights_row0.s2, acc.s1); \ + acc.s2 = fma(src0_left.s2, weights_row0.s0, acc.s2); \ + acc.s2 = fma(src0_mid.s2, weights_row0.s1, acc.s2); \ + acc.s2 = fma(src0_right.s2, weights_row0.s2, acc.s2); \ + acc.s3 = fma(src0_left.s3, weights_row0.s0, acc.s3); \ + acc.s3 = fma(src0_mid.s3, weights_row0.s1, acc.s3); \ + acc.s3 = fma(src0_right.s3, weights_row0.s2, acc.s3); \ + }) + +#define CONVOLUTION1x3_BIFROST4X1_STRIDE2(acc, src0_left, src0_mid, src0_right, weights_row0) \ + ({ \ + acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \ + acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \ + acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \ + acc.s1 = fma(src0_left.s2, weights_row0.s0, acc.s1); \ + acc.s1 = fma(src0_mid.s2, weights_row0.s1, acc.s1); \ + acc.s1 = fma(src0_right.s2, weights_row0.s2, acc.s1); \ + acc.s2 = fma(src0_left.s4, weights_row0.s0, acc.s2); \ + acc.s2 = fma(src0_mid.s4, weights_row0.s1, acc.s2); \ + acc.s2 = fma(src0_right.s4, weights_row0.s2, acc.s2); \ + acc.s3 = fma(src0_left.s6, weights_row0.s0, acc.s3); \ + acc.s3 = fma(src0_mid.s6, weights_row0.s1, acc.s3); \ + acc.s3 = fma(src0_right.s6, weights_row0.s2, acc.s3); \ + }) + +/** Get the pointer position at a certain offset in x and y direction. + * + * @param[in] ptr Pointer to the starting position of the buffer + * @param[in] x Relative X position + * @param[in] y Relative Y position + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + */ +inline __global uchar *ptr_offset(__global uchar *ptr, const int x, const int y, const int stride_x, const int stride_y) +{ + return ptr + x * stride_x + y * stride_y; +} + +/** Perform 3x3 convolution for stride_x=1 and stride_y=1 when DILATION_X>1 and DILATION_Y>1 for F32 + * + * @param[in] src_addr Pointer to the starting position of where to perform the convolution + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] y_offset Offset from the source tensor from which to start convolution + * @param[in] weights_addr Pointer from where to get weights + * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension + */ +inline float2 convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes, + const int y_offset, __global uchar *weights_addr, const int weights_stride_y) +{ + // Load the weights + float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y)); + float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y)); + float3 weights_row2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y)); + + float2 pixels0 = 0.0f; + + float2 src00_left = vload2(0, (__global float *)ptr_offset(src_addr, 0, y_offset, stride_x_bytes, stride_y_bytes)); // Row0 + float2 src00_mid = vload2(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset, stride_x_bytes, stride_y_bytes)); + float2 src00_right = vload2(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset, stride_x_bytes, stride_y_bytes)); + + float2 src10_left = vload2(0, (__global float *)ptr_offset(src_addr, 0, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); // Row1 + float2 src10_mid = vload2(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); + float2 src10_right = vload2(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); + + float2 src20_left = vload2(0, (__global float *)ptr_offset(src_addr, 0, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); // Row2 + float2 src20_mid = vload2(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); + float2 src20_right = vload2(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); + + CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src00_left, src00_mid, src00_right, weights_row0); + CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src10_left, src10_mid, src10_right, weights_row1); + CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src20_left, src20_mid, src20_right, weights_row2); + + return pixels0; +} + +/** Perform 3x3 convolution for stride_x=2 and stride_y=2 when DILATION_X>1 and DILATION_Y>1 for F32 + * + * @param[in] src_addr Pointer to the starting position of where to perform the convolution + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] y_offset Offset from the source tensor from which to start convolution + * @param[in] weights_addr Pointer from where to get weights + * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension + */ +inline float2 convolution_3x3_dilation_stridex2_stridey2_bifrost_f32(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes, + const int y_offset, __global uchar *weights_addr, const int weights_stride_y) +{ + // Load the weights + float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y)); + float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y)); + float3 weights_row2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y)); + + float2 pixels0 = 0.0f; + + float3 src00_left = vload3(0, (__global float *)ptr_offset(src_addr, 0, y_offset, stride_x_bytes, stride_y_bytes)); // Row0 + float3 src00_mid = vload3(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset, stride_x_bytes, stride_y_bytes)); + float3 src00_right = vload3(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset, stride_x_bytes, stride_y_bytes)); + + float3 src10_left = vload3(0, (__global float *)ptr_offset(src_addr, 0, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); // Row1 + float3 src10_mid = vload3(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); + float3 src10_right = vload3(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); + + float3 src20_left = vload3(0, (__global float *)ptr_offset(src_addr, 0, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); // Row2 + float3 src20_mid = vload3(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); + float3 src20_right = vload3(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); + + CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src00_left, src00_mid, src00_right, weights_row0); + CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src10_left, src10_mid, src10_right, weights_row1); + CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src20_left, src20_mid, src20_right, weights_row2); + + return pixels0; +} + +/** Perform 3x3 convolution for stride_x=1 and stride_y=1 when DILATION_X>1 and DILATION_Y>1 for f16 + * + * @param[in] src_addr Pointer to the starting position of where to perform the convolution + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] y_offset Offset from the source tensor from which to start convolution + * @param[in] weights_addr Pointer from where to get weights + * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension + */ +inline half4 convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes, + const int y_offset, __global uchar *weights_addr, const int weights_stride_y) +{ + // Load the weights + half3 weights_row0 = vload3(0, (__global half *)(weights_addr + 0 * weights_stride_y)); + half3 weights_row1 = vload3(0, (__global half *)(weights_addr + 1 * weights_stride_y)); + half3 weights_row2 = vload3(0, (__global half *)(weights_addr + 2 * weights_stride_y)); + + half4 pixels0 = 0.0f; + + half4 src00_left = vload4(0, (__global half *)ptr_offset(src_addr, 0, y_offset, stride_x_bytes, stride_y_bytes)); // Row0 + half4 src00_mid = vload4(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset, stride_x_bytes, stride_y_bytes)); + half4 src00_right = vload4(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset, stride_x_bytes, stride_y_bytes)); + + half4 src10_left = vload4(0, (__global half *)ptr_offset(src_addr, 0, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); // Row1 + half4 src10_mid = vload4(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); + half4 src10_right = vload4(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); + + half4 src20_left = vload4(0, (__global half *)ptr_offset(src_addr, 0, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); // Row2 + half4 src20_mid = vload4(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); + half4 src20_right = vload4(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); + + CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src00_left, src00_mid, src00_right, weights_row0); + CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src10_left, src10_mid, src10_right, weights_row1); + CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src20_left, src20_mid, src20_right, weights_row2); + + return pixels0; +} + +/** Perform 3x3 convolution for stride_x=2 and stride_y=2 when DILATION_X>1 and DILATION_Y>1 for F16 + * + * @param[in] src_addr Pointer to the starting position of where to perform the convolution + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] y_offset Offset from the source tensor from which to start convolution + * @param[in] weights_addr Pointer from where to get weights + * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension + */ +inline half4 convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes, + const int y_offset, __global uchar *weights_addr, const int weights_stride_y) +{ + // Load the weights + half3 weights_row0 = vload3(0, (__global half *)(weights_addr + 0 * weights_stride_y)); + half3 weights_row1 = vload3(0, (__global half *)(weights_addr + 1 * weights_stride_y)); + half3 weights_row2 = vload3(0, (__global half *)(weights_addr + 2 * weights_stride_y)); + + half4 pixels0 = 0.0f; + + half8 src00_left = vload8(0, (__global half *)ptr_offset(src_addr, 0, y_offset, stride_x_bytes, stride_y_bytes)); // Row0 + half8 src00_mid = vload8(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset, stride_x_bytes, stride_y_bytes)); + half8 src00_right = vload8(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset, stride_x_bytes, stride_y_bytes)); + + half8 src10_left = vload8(0, (__global half *)ptr_offset(src_addr, 0, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); // Row1 + half8 src10_mid = vload8(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); + half8 src10_right = vload8(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y, stride_x_bytes, stride_y_bytes)); + + half8 src20_left = vload8(0, (__global half *)ptr_offset(src_addr, 0, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); // Row2 + half8 src20_mid = vload8(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); + half8 src20_right = vload8(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); + + CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src00_left, src00_mid, src00_right, weights_row0); + CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src10_left, src10_mid, src10_right, weights_row1); + CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src20_left, src20_mid, src20_right, weights_row2); + + return pixels0; +} + +#endif /* DILATION_X==1 && DILATION_Y==1 */ + /** This OpenCL kernel is optimized for Bifrost architectures and computes the depthwise convolution 3x3 when both * stride_x and stride_y are equal to 1 * @@ -326,6 +571,7 @@ __kernel void depthwise_convolution_3x3_stridex1_stridey1_bifrost_f32( __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z; __global uchar *src_addr = src.ptr - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) * (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z; +#if(DILATION_X == 1 && DILATION_Y == 1) // Load the weights float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y)); float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y)); @@ -352,6 +598,19 @@ __kernel void depthwise_convolution_3x3_stridex1_stridey1_bifrost_f32( CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels3, src40, weights_row1); CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels3, src50, weights_row2); +#else /* DILATION_X==1 && DILATION_Y==1 */ + + //3x3 Convolution of elements starting in 0th row + pixels0 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(src_addr, src.stride_x, src.stride_y, 0, weights_addr, weights_stride_y); + //3x3 Convolution of elements starting in 1st row + pixels1 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(src_addr, src.stride_x, src.stride_y, 1, weights_addr, weights_stride_y); + //3x3 Convolution of elements starting in 2nd row + pixels2 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(src_addr, src.stride_x, src.stride_y, 2, weights_addr, weights_stride_y); + //3x3 Convolution of elements starting in 3rd row + pixels3 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(src_addr, src.stride_x, src.stride_y, 3, weights_addr, weights_stride_y); + +#endif /* DILATION_X==1 && DILATION_Y==1 */ + #ifdef HAS_BIAS Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases); @@ -425,6 +684,8 @@ __kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f32( __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z; __global uchar *src_addr = src.ptr - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) * (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z; +#if(DILATION_X == 1 && DILATION_Y == 1) + // Load the weights float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y)); float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y)); @@ -449,6 +710,14 @@ __kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f32( CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels1, src30, src31, weights_row1); CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels1, src40, src41, weights_row2); +#else /* DILATION_X==1 && DILATION_Y==1 */ + + //3x3 Convolution of elements starting in 0th row + pixels0 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f32(src_addr, src.stride_x, src.stride_y, 0, weights_addr, weights_stride_y); + //3x3 Convolution of elements starting in 2nd row + pixels1 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f32(src_addr, src.stride_x, src.stride_y, 2, weights_addr, weights_stride_y); +#endif /* DILATION_X==1 && DILATION_Y==1 */ + #ifdef HAS_BIAS Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases); @@ -632,11 +901,12 @@ __kernel void depthwise_convolution_reshape_weights_generic( } #endif //defined(SRC_WIDTH) && defined(DATA_TYPE) -#if defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP) && defined(PAD_RIGHT) && defined(PAD_BOTTOM) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DATA_TYPE) && defined(PAD_VALUE) && defined(DEPTH_MULTIPLIER) +#if defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP) && defined(PAD_RIGHT) && defined(PAD_BOTTOM) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DATA_TYPE) && defined(PAD_VALUE) && defined(DEPTH_MULTIPLIER) && defined(DILATION_X) && defined(DILATION_Y) /** This kernel performs a reshaping of the input tensor to a tensor used to perform depthwise convolution using vector to matrix multiplication. * * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float * @note The convolution information must be passed at compile time using -DSTRIDE_X, -DSTRIDE_Y, -DPAD_LEFT, -DPAD_TOP, -DPAD_RIGHT, -DPAD_BOTTOM, -DKERNEL_WIDHT, -DKERNEL_HEIGHT, -DSRC_WIDTH, -DSRC_HEIGHT, -DDEPTH_MULTIPLIER + * @note The dilation_x and dilation_y must be passed at compile time using -DDILATION_X and -DDILATION_Y: e.g. -DDILATION_X=1, -DDILATION_Y=1 * * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) @@ -661,7 +931,7 @@ __kernel void depthwise_im2col(TENSOR3D_DECLARATION(src), TENSOR3D_DECLARATION(d const int src_pixel_linear = get_global_id(1) * STRIDE_X; const int full_length = SRC_WIDTH + PAD_LEFT + PAD_RIGHT; - const int max_initial_x = STRIDE_X * (((full_length - KERNEL_WIDTH) / STRIDE_X) + 1); + const int max_initial_x = STRIDE_X * (((full_length - (KERNEL_WIDTH + (KERNEL_WIDTH - 1) * (DILATION_X - 1))) / STRIDE_X) + 1); const int src_x = -PAD_LEFT + src_pixel_linear % max_initial_x; const int src_y = -PAD_TOP + src_pixel_linear / max_initial_x * STRIDE_Y; @@ -670,9 +940,9 @@ __kernel void depthwise_im2col(TENSOR3D_DECLARATION(src), TENSOR3D_DECLARATION(d __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + src_z * in_stride_z; __global DATA_TYPE *output_ptr = ((__global DATA_TYPE *)(dst.ptr)); - for(int y = src_y; y < src_y + KERNEL_HEIGHT; ++y) + for(int y = src_y; y < src_y + KERNEL_HEIGHT + (KERNEL_HEIGHT - 1) * (DILATION_Y - 1); y += DILATION_Y) { - for(int x = src_x; x < src_x + KERNEL_WIDTH; ++x, ++output_ptr) + for(int x = src_x; x < src_x + KERNEL_WIDTH + (KERNEL_WIDTH - 1) * (DILATION_X - 1); x += DILATION_X, ++output_ptr) { if(x < 0 || x >= SRC_WIDTH || y < 0 || y >= SRC_HEIGHT) { @@ -754,6 +1024,8 @@ inline half4 convolution1x3_stride_1_f16(__global const uchar *left_pixel, const half middle_coeff, const half right_coeff) { +#if(DILATION_X == 1 && DILATION_Y == 1) + half8 temp = vload8(0, (__global half *)left_pixel); half4 left = CONVERT(temp.s0123, half4); @@ -761,6 +1033,12 @@ inline half4 convolution1x3_stride_1_f16(__global const uchar *left_pixel, half4 right = CONVERT(temp.s2345, half4); return left * (half4)left_coeff + middle * (half4)middle_coeff + right * (half4)right_coeff; +#else /* DILATION_X==1 && DILATION_Y==1 */ + return vload4(0, (__global half *)left_pixel) * (half4)left_coeff + + vload4(0, (__global half *)(left_pixel) + DILATION_X) * (half4)middle_coeff + + vload4(0, (__global half *)(left_pixel) + 2 * DILATION_X) * (half4)right_coeff; + +#endif /* DILATION_X==1 && DILATION_Y==1 */ } /** Compute a 1D horizontal convolution of size 3 and stride 2 for 16bit floating point type. @@ -777,6 +1055,8 @@ inline half4 convolution1x3_stride_2_f16(__global const uchar *left_pixel, const half middle_coeff, const half right_coeff) { +#if(DILATION_X == 1 && DILATION_Y == 1) + half8 temp0 = vload8(0, (__global half *)left_pixel); half temp1 = *((__global half *)(left_pixel + 8 * sizeof(half))); @@ -785,6 +1065,15 @@ inline half4 convolution1x3_stride_2_f16(__global const uchar *left_pixel, half4 right = CONVERT((half4)(temp0.s246, temp1), half4); return left * (half4)left_coeff + middle * (half4)middle_coeff + right * (half4)right_coeff; +#else /* DILATION_X==1 && DILATION_Y==1 */ + + __global half *left_pixel_float = (__global half *)left_pixel; + + return (half4)(*left_pixel_float, *(left_pixel_float + 2), *(left_pixel_float + 4), *(left_pixel_float + 6)) * (half4)left_coeff + + (half4)(*(left_pixel_float + DILATION_X), *(left_pixel_float + DILATION_X + 2), *(left_pixel_float + DILATION_X + 4), *(left_pixel_float + DILATION_X + 6)) * (half4)middle_coeff + + (half4)(*(left_pixel_float + DILATION_X * 2), *(left_pixel_float + DILATION_X * 2 + 2), *(left_pixel_float + DILATION_X * 2 + 4), *(left_pixel_float + DILATION_X * 2 + 6)) * (half4)right_coeff; + +#endif /* DILATION_X==1 && DILATION_Y==1 */ } /** Compute a 1D horizontal convolution of size 3 and stride 3 for 16bit floating point type. @@ -801,6 +1090,8 @@ inline half4 convolution1x3_stride_3_f16(__global const uchar *left_pixel, const half middle_coeff, const half right_coeff) { +#if(DILATION_X == 1 && DILATION_Y == 1) + half16 temp0 = vload16(0, (__global half *)left_pixel); half4 left = CONVERT(temp0.s0369, half4); @@ -808,6 +1099,15 @@ inline half4 convolution1x3_stride_3_f16(__global const uchar *left_pixel, half4 right = CONVERT(temp0.s258B, half4); return left * (half4)left_coeff + middle * (half4)middle_coeff + right * (half4)right_coeff; +#else /* DILATION_X==1 && DILATION_Y==1 */ + + __global half *left_pixel_float = (__global half *)left_pixel; + + return (half4)(*left_pixel_float, *(left_pixel_float + 3), *(left_pixel_float + 6), *(left_pixel_float + 9)) * (half4)left_coeff + + (half4)(*(left_pixel_float + DILATION_X), *(left_pixel_float + DILATION_X + 3), *(left_pixel_float + DILATION_X + 6), *(left_pixel_float + DILATION_X + 9)) * (half4)middle_coeff + + (half4)(*(left_pixel_float + DILATION_X * 2), *(left_pixel_float + DILATION_X * 2 + 3), *(left_pixel_float + DILATION_X * 2 + 6), *(left_pixel_float + DILATION_X * 2 + 9)) * (half4)right_coeff; + +#endif /* DILATION_X==1 && DILATION_Y==1 */ } /** Apply a 3x3 convolution matrix to a single channel F16 input image and return the result. @@ -841,8 +1141,8 @@ inline half4 convolution3x3_f16( half4 pixels; pixels = convolution1x3_f16(offset(src, 0, 0), mat0, mat1, mat2); - pixels += convolution1x3_f16(offset(src, 0, 1), mat3, mat4, mat5); - pixels += convolution1x3_f16(offset(src, 0, 2), mat6, mat7, mat8); + pixels += convolution1x3_f16(offset(src, 0, DILATION_Y), mat3, mat4, mat5); + pixels += convolution1x3_f16(offset(src, 0, DILATION_Y * 2), mat6, mat7, mat8); return pixels; } @@ -986,6 +1286,7 @@ __kernel void depthwise_convolution_3x3_stridex1_stridey1_bifrost_f16( __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z; __global uchar *src_addr = src.ptr - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) * (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z; +#if(DILATION_X == 1 && DILATION_Y == 1) // Load the weights half3 weights_row0 = vload3(0, (__global half *)(weights_addr + 0 * weights_stride_y)); half3 weights_row1 = vload3(0, (__global half *)(weights_addr + 1 * weights_stride_y)); @@ -1012,6 +1313,19 @@ __kernel void depthwise_convolution_3x3_stridex1_stridey1_bifrost_f16( CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels3, src40, weights_row1); CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels3, src50, weights_row2); +#else /* DILATION_X==1 && DILATION_Y==1 */ + + //3x3 Convolution of elements starting in 0th row + pixels0 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(src_addr, src.stride_x, src.stride_y, 0, weights_addr, weights_stride_y); + //3x3 Convolution of elements starting in 1st row + pixels1 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(src_addr, src.stride_x, src.stride_y, 1, weights_addr, weights_stride_y); + //3x3 Convolution of elements starting in 2nd row + pixels2 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(src_addr, src.stride_x, src.stride_y, 2, weights_addr, weights_stride_y); + //3x3 Convolution of elements starting in 3rd row + pixels3 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(src_addr, src.stride_x, src.stride_y, 3, weights_addr, weights_stride_y); + +#endif /* DILATION_X==1 && DILATION_Y==1 */ + #ifdef HAS_BIAS pixels0 += (half4)bias; pixels1 += (half4)bias; @@ -1088,6 +1402,8 @@ __kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f16( __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z; __global uchar *src_addr = src.ptr - batch * (DST_CHANNELS / DEPTH_MULTIPLIER) * (DEPTH_MULTIPLIER - 1) * src_step_z - (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z; +#if(DILATION_X == 1 && DILATION_Y == 1) + // Load the weights half3 weights_row0 = vload3(0, (__global half *)(weights_addr + 0 * weights_stride_y)); half3 weights_row1 = vload3(0, (__global half *)(weights_addr + 1 * weights_stride_y)); @@ -1112,6 +1428,13 @@ __kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f16( CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels1, src30, src31, weights_row1); CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels1, src40, src41, weights_row2); +#else /* DILATION_X==1 && DILATION_Y==1 */ + //3x3 Convolution of elements starting in 0th row + pixels0 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(src_addr, src.stride_x, src.stride_y, 0, weights_addr, weights_stride_y); + //3x3 Convolution of elements starting in 2nd row + pixels1 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(src_addr, src.stride_x, src.stride_y, 2, weights_addr, weights_stride_y); +#endif /* DILATION_X==1 && DILATION_Y==1 */ + #ifdef HAS_BIAS pixels0 += (half4)bias; pixels1 += (half4)bias; @@ -1189,9 +1512,9 @@ __kernel void depthwise_convolution_3x3_nhwc( #if defined(DST_DEPTH) int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y int b = get_global_id(2) / (int)DST_DEPTH; // batch -#else // defined(DST_DEPTH) - int z = get_global_id(2); // spatial coordinate y -#endif // defined(DST_DEPTH) +#else // defined(DST_DEPTH) + int z = get_global_id(2); // spatial coordinate y +#endif // defined(DST_DEPTH) Vector weights = CONVERT_TO_VECTOR_STRUCT(weights); @@ -1203,7 +1526,7 @@ __kernel void depthwise_convolution_3x3_nhwc( int z_coord = 0; int4 offset = 0; - int4 y_offset = ((int4)(y * CONV_STRIDE_X) + (int4)(0, 1, 2, 3) - CONV_PAD_LEFT) * (int4)src_stride_y; + int4 y_offset = ((int4)(y * CONV_STRIDE_X) + (int4)(0, DILATION_X * 1, DILATION_X * 2, DILATION_X * 3) - CONV_PAD_LEFT) * (int4)src_stride_y; // We compute 2x1x1 [C,W,H] elements VEC_FLOAT acc = 0; @@ -1236,16 +1559,16 @@ __kernel void depthwise_convolution_3x3_nhwc( // z == 1 // z_coord can be only negative for z = 0 so we do not need to clamp it // Moreover z_coord cannot be out-of-bound for z = 1 so we do not need to clamp the offset - z_coord = z * CONV_STRIDE_Y - (int)CONV_PAD_TOP + 1; + z_coord = z * CONV_STRIDE_Y - (int)CONV_PAD_TOP + DILATION_Y; offset = y_offset + (int4)(z_coord * src_stride_z); VEC_FLOAT values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); VEC_FLOAT values4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); VEC_FLOAT values5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); // z == 2 - // After z = 1 we can simply add src_stride_z to offset without updating z_coord - // However offset can be out-of-bound so we need to check if it is greater than max_offset - offset += (int4)src_stride_z; + // Offset can be out-of-bound so we need to check if it is greater than max_offset + z_coord = z * CONV_STRIDE_Y - (int)CONV_PAD_TOP + DILATION_Y * 2; + offset = y_offset + (int4)(z_coord * src_stride_z); offset = min(offset, (int4)max_offset); VEC_FLOAT values6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); VEC_FLOAT values7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); @@ -1338,9 +1661,9 @@ __kernel void depthwise_convolution_3x3_nhwc_stride1( #if defined(DST_DEPTH) int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y int b = get_global_id(2) / (int)DST_DEPTH; // batch -#else // defined(DST_DEPTH) - int z = get_global_id(2); // spatial coordinate y -#endif // defined(DST_DEPTH) +#else // defined(DST_DEPTH) + int z = get_global_id(2); // spatial coordinate y +#endif // defined(DST_DEPTH) Vector weights = CONVERT_TO_VECTOR_STRUCT(weights); diff --git a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl index 503aa7e837..8d145a038e 100644 --- a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl +++ b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl @@ -53,6 +53,8 @@ #if !(defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)) +#if DILATION_X == 1 + #if CONV_STRIDE_X == 1 #define GET_VALUES(first_value, left, middle, right) \ ({ \ @@ -85,6 +87,46 @@ }) #endif /* CONV_STRIDE_X */ +#else /* DILATION_X == 1 */ + +#if CONV_STRIDE_X == 1 +#define GET_VALUES(first_value, left, middle, right) \ + ({ \ + left = CONVERT(vload8(0, first_value), int8); \ + middle = CONVERT(vload8(0, first_value + DILATION_X * sizeof(uchar)), int8); \ + right = CONVERT(vload8(0, first_value + 2 * DILATION_X * sizeof(uchar)), int8); \ + }) +#elif CONV_STRIDE_X == 2 +#define GET_VALUES(first_value, left, middle, right) \ + ({ \ + int16 temp0 = CONVERT(vload16(0, first_value), int16); \ + left = CONVERT(temp0.s02468ace, int8); \ + \ + temp0 = CONVERT(vload16(0, first_value + DILATION_X * sizeof(uchar)), int16); \ + middle = CONVERT(temp0.s02468ace, int8); \ + \ + temp0 = CONVERT(vload16(0, first_value + 2 * DILATION_X * sizeof(uchar)), int16); \ + right = CONVERT(temp0.s02468ace, int8); \ + }) +#else /* CONV_STRIDE_X */ +#define GET_VALUES(first_value, left, middle, right) \ + ({ \ + int16 temp0 = CONVERT(vload16(0, first_value), int16); \ + int8 temp1 = CONVERT(vload8(0, (first_value + 16 * sizeof(uchar))), int8); \ + left = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \ + \ + temp0 = CONVERT(vload16(0, first_value + DILATION_X * sizeof(uchar)), int16); \ + temp1 = CONVERT(vload8(0, (first_value + (16 + DILATION_X) * sizeof(uchar))), int8); \ + middle = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \ + \ + temp0 = CONVERT(vload16(0, first_value + 2 * DILATION_X * sizeof(uchar)), int16); \ + temp1 = CONVERT(vload8(0, (first_value + (16 + 2 * DILATION_X) * sizeof(uchar))), int8); \ + right = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \ + }) + +#endif /* CONV_STRIDE_X */ +#endif /* DILATION_X==1 */ + /** This function computes the depthwise convolution quantized. * * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8 @@ -151,10 +193,10 @@ __kernel void dwc_3x3_native_qasymm8_nchw( int8 values0 = 0; int8 sum0 = 0; -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 int8 values1 = 0; int8 sum1 = 0; -#endif /* CONV_STRIDE_Y */ +#endif /* CONV_STRIDE_Y &&DILATION_Y==1 */ // Row0 int8 left, middle, right; @@ -168,44 +210,44 @@ __kernel void dwc_3x3_native_qasymm8_nchw( #endif /* WEIGHTS_OFFSET != 0 */ // Row1 - GET_VALUES(src.ptr + 1 * src_stride_y, left, middle, right); + GET_VALUES(src.ptr + DILATION_Y * src_stride_y, left, middle, right); values0 += left * (int8)(w1.s0); values0 += middle * (int8)(w1.s1); values0 += right * (int8)(w1.s2); -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 values1 += left * (int8)(w0.s0); values1 += middle * (int8)(w0.s1); values1 += right * (int8)(w0.s2); -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y && DILATION_Y== 1 */ #if WEIGHTS_OFFSET != 0 int8 tmp = left + middle + right; sum0 += tmp; -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 sum1 += tmp; -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y &&DILATION_Y== 1 */ #endif /* WEIGHTS_OFFSET != 0 */ // Row2 - GET_VALUES(src.ptr + 2 * src_stride_y, left, middle, right); + GET_VALUES(src.ptr + 2 * DILATION_Y * src_stride_y, left, middle, right); values0 += left * (int8)(w2.s0); values0 += middle * (int8)(w2.s1); values0 += right * (int8)(w2.s2); -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 values1 += left * (int8)(w1.s0); values1 += middle * (int8)(w1.s1); values1 += right * (int8)(w1.s2); -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y &&DILATION_Y == 1 */ #if WEIGHTS_OFFSET != 0 tmp = left + middle + right; sum0 += tmp; -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 sum1 += tmp; -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */ #endif /* WEIGHTS_OFFSET != 0 */ -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 // Row3 GET_VALUES(src.ptr + 3 * src_stride_y, left, middle, right); values1 += left * (int8)(w2.s0); @@ -215,20 +257,20 @@ __kernel void dwc_3x3_native_qasymm8_nchw( #if WEIGHTS_OFFSET != 0 sum1 += left + middle + right; #endif /* WEIGHTS_OFFSET != 0 */ -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y && DILATION_Y == 1 */ #if defined(HAS_BIAS) values0 += (int8)(bias_value); -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 values1 += (int8)(bias_value); -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y & &DILATION_Y == 1 */ #endif //defined(HAS_BIAS) #if WEIGHTS_OFFSET != 0 values0 += sum0 * (int8)(WEIGHTS_OFFSET); -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 values1 += sum1 * (int8)(WEIGHTS_OFFSET); -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */ #endif /* WEIGHTS_OFFSET != 0 */ #if INPUT_OFFSET != 0 @@ -236,16 +278,16 @@ __kernel void dwc_3x3_native_qasymm8_nchw( ushort3 tmp_we = convert_ushort3(w0) + convert_ushort3(w1) + convert_ushort3(w2); sum_weights += tmp_we.s0 + tmp_we.s1 + tmp_we.s2; values0 += sum_weights * (int8)(INPUT_OFFSET); -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 values1 += sum_weights * (int8)(INPUT_OFFSET); -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */ #endif /* INPUT_OFFSET != 0 */ #if K_OFFSET != 0 values0 += (int8)(K_OFFSET); -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 values1 += (int8)(K_OFFSET); -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/ #endif /* K_OFFSET != 0 */ #if defined(REAL_MULTIPLIER) @@ -264,7 +306,7 @@ __kernel void dwc_3x3_native_qasymm8_nchw( res0 = min(res0, (uchar8)255); vstore8(ACTIVATION_FUNC(res0), 0, dst.ptr); -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 #if defined(REAL_MULTIPLIER) values1 = CONVERT(round(CONVERT(values1, float8) * (float8)REAL_MULTIPLIER), int8); @@ -281,11 +323,11 @@ __kernel void dwc_3x3_native_qasymm8_nchw( res1 = min(res1, (uchar8)255); vstore8(ACTIVATION_FUNC(res1), 0, dst.ptr + dst_stride_y); -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/ } #else // !(defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)) - +#if DILATION_X == 1 #if CONV_STRIDE_X == 1 #define GET_VALUES(first_value, left, middle, right) \ ({ \ @@ -317,6 +359,43 @@ __kernel void dwc_3x3_native_qasymm8_nchw( right = (uchar8)(temp0.s258b, temp0.se, temp1.s147); \ }) #endif /* CONV_STRIDE_X */ +#else /*DILATION_X==1*/ + +#if CONV_STRIDE_X == 1 +#define GET_VALUES(first_value, left, middle, right) \ + ({ \ + left = vload8(0, first_value); \ + middle = vload8(0, first_value + DILATION_X * sizeof(uchar)); \ + right = vload8(0, first_value + 2 * DILATION_X * sizeof(uchar)); \ + }) +#elif CONV_STRIDE_X == 2 +#define GET_VALUES(first_value, left, middle, right) \ + ({ \ + uchar16 temp0 = vload16(0, first_value); \ + left = temp0.s02468ace; \ + temp0 = vload16(0, first_value + DILATION_X * sizeof(uchar)); \ + middle = temp0.s02468ace; \ + temp0 = vload16(0, first_value + 2 * DILATION_X * sizeof(uchar)); \ + right = temp0.s02468ace; \ + }) +#else /* CONV_STRIDE_X */ +#define GET_VALUES(first_value, left, middle, right) \ + ({ \ + uchar16 temp0 = vload16(0, first_value); \ + uchar8 temp1 = vload8(0, (first_value + 16 * sizeof(uchar))); \ + left = (uchar8)(temp0.s0369, temp0.scf, temp1.s25); \ + \ + temp0 = vload16(0, first_value + DILATION_X * sizeof(uchar)); \ + temp1 = vload8(0, (first_value + (16 + DILATION_X) * sizeof(uchar))); \ + middle = (uchar8)(temp0.s0369, temp0.scf, temp1.s25); \ + \ + temp0 = vload16(0, first_value + 2 * DILATION_X * sizeof(uchar)); \ + temp1 = vload8(0, (first_value + (16 + 2 * DILATION_X) * sizeof(uchar))); \ + right = (uchar8)(temp0.s0369, temp0.scf, temp1.s25); \ + }) + +#endif /* CONV_STRIDE_X */ +#endif /*DILATION_X==1*/ /** This function computes the depthwise convolution quantized using dot product when the data layout is NCHW. * * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8 @@ -389,8 +468,8 @@ __kernel void dwc_3x3_native_qasymm8_dot8_nchw( int8 sum0 = 0; GET_VALUES(src.ptr + 0 * src_stride_y, left0, middle0, right0); - GET_VALUES(src.ptr + 1 * src_stride_y, left1, middle1, right1); - GET_VALUES(src.ptr + 2 * src_stride_y, left2, middle2, right2); + GET_VALUES(src.ptr + DILATION_Y * src_stride_y, left1, middle1, right1); + GET_VALUES(src.ptr + 2 * DILATION_Y * src_stride_y, left2, middle2, right2); #if WEIGHTS_OFFSET != 0 sum0 += convert_int8(left0) + convert_int8(middle0) + convert_int8(right0); @@ -398,7 +477,7 @@ __kernel void dwc_3x3_native_qasymm8_dot8_nchw( sum0 += convert_int8(left2) + convert_int8(middle2) + convert_int8(right2); #endif /* WEIGHTS_OFFSET != 0 */ -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 // If conv_stride_y is equals to 1, we compute two output rows uchar8 left3, middle3, right3; @@ -412,7 +491,7 @@ __kernel void dwc_3x3_native_qasymm8_dot8_nchw( sum1 += convert_int8(left2) + convert_int8(middle2) + convert_int8(right2); sum1 += convert_int8(left3) + convert_int8(middle3) + convert_int8(right3); #endif /* WEIGHTS_OFFSET != 0 */ -#endif // CONV_STRIDE_Y == 1 +#endif // CONV_STRIDE_Y == 1 && DILATION_Y==1 ARM_DOT((uchar4)(left0.s0, middle0.s0, right0.s0, left1.s0), (uchar4)(w0.s0, w0.s1, w0.s2, w1.s0), values0.s0); ARM_DOT((uchar4)(middle1.s0, right1.s0, left2.s0, middle2.s0), (uchar4)(w1.s1, w1.s2, w2.s0, w2.s1), values0.s0); @@ -446,7 +525,7 @@ __kernel void dwc_3x3_native_qasymm8_dot8_nchw( ARM_DOT((uchar4)(middle1.s7, right1.s7, left2.s7, middle2.s7), (uchar4)(w1.s1, w1.s2, w2.s0, w2.s1), values0.s7); values0.s7 += right2.s7 * w2.s2; -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 ARM_DOT((uchar4)(left1.s0, middle1.s0, right1.s0, left2.s0), (uchar4)(w0.s0, w0.s1, w0.s2, w1.s0), values1.s0); ARM_DOT((uchar4)(middle2.s0, right2.s0, left3.s0, middle3.s0), (uchar4)(w1.s1, w1.s2, w2.s0, w2.s1), values1.s0); values1.s0 += right3.s0 * w2.s2; @@ -478,20 +557,20 @@ __kernel void dwc_3x3_native_qasymm8_dot8_nchw( ARM_DOT((uchar4)(left1.s7, middle1.s7, right1.s7, left2.s7), (uchar4)(w0.s0, w0.s1, w0.s2, w1.s0), values1.s7); ARM_DOT((uchar4)(middle2.s7, right2.s7, left3.s7, middle3.s7), (uchar4)(w1.s1, w1.s2, w2.s0, w2.s1), values1.s7); values1.s7 += right3.s7 * w2.s2; -#endif // CONV_STRIDE_Y == 1 +#endif // CONV_STRIDE_Y == 1 && DILATION_Y==1 #if defined(HAS_BIAS) values0 += (int8)(bias_value); -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 values1 += (int8)(bias_value); -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */ #endif //defined(HAS_BIAS) #if WEIGHTS_OFFSET != 0 values0 += sum0 * (int8)(WEIGHTS_OFFSET); -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 values1 += sum1 * (int8)(WEIGHTS_OFFSET); -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */ #endif /* WEIGHTS_OFFSET != 0 */ #if INPUT_OFFSET != 0 @@ -499,16 +578,16 @@ __kernel void dwc_3x3_native_qasymm8_dot8_nchw( ushort3 tmp_we = convert_ushort3(w0) + convert_ushort3(w1) + convert_ushort3(w2); sum_weights += tmp_we.s0 + tmp_we.s1 + tmp_we.s2; values0 += sum_weights * (int8)(INPUT_OFFSET); -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 values1 += sum_weights * (int8)(INPUT_OFFSET); -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/ #endif /* INPUT_OFFSET != 0 */ #if K_OFFSET != 0 values0 += (int8)(K_OFFSET); -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 values1 += (int8)(K_OFFSET); -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/ #endif /* K_OFFSET != 0 */ #if defined(REAL_MULTIPLIER) @@ -527,7 +606,7 @@ __kernel void dwc_3x3_native_qasymm8_dot8_nchw( res0 = min(res0, (uchar8)255); vstore8(ACTIVATION_FUNC(res0), 0, dst.ptr); -#if CONV_STRIDE_Y == 1 +#if CONV_STRIDE_Y == 1 && DILATION_Y == 1 #if defined(REAL_MULTIPLIER) @@ -545,7 +624,7 @@ __kernel void dwc_3x3_native_qasymm8_dot8_nchw( res1 = min(res1, (uchar8)255); vstore8(ACTIVATION_FUNC(res1), 0, dst.ptr + dst_stride_y); -#endif /* CONV_STRIDE_Y == 1 */ +#endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/ } #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) @@ -669,7 +748,7 @@ __kernel void dwc_3x3_reshaped_qasymm8_nhwc( int z_coord = 0; int4 offset = 0; - int4 y_coord = ((int4)(y * CONV_STRIDE_X) + (int4)(0, 1, 2, 3)) - (int)CONV_PAD_LEFT; + int4 y_coord = ((int4)(y * CONV_STRIDE_X) + (int4)(0, DILATION_X * 1, DILATION_X * 2, DILATION_X * 3)) - (int)CONV_PAD_LEFT; // Only for y = 0 we can have a negative coordinate. If so, we convert it to SRC_DIM_1 y_coord.s0 = min((uint)y_coord.s0, (uint)SRC_DIM_1); @@ -720,16 +799,16 @@ __kernel void dwc_3x3_reshaped_qasymm8_nhwc( // z == 1 // z_coord can be only negative for z = 0 so we do not need to clamp it // Moreover z_coord cannot be out-of-bound for z = 1 so we do not need to clamp the offset - z_coord = z * (int)CONV_STRIDE_Y - (int)CONV_PAD_TOP + 1; + z_coord = z * (int)CONV_STRIDE_Y - (int)CONV_PAD_TOP + DILATION_Y; offset = y_offset + (int4)(z_coord * src_stride_z); VEC_UCHAR values3 = VLOAD(VEC_SIZE)(0, src_addr + offset.s0); VEC_UCHAR values4 = VLOAD(VEC_SIZE)(0, src_addr + offset.s1); VEC_UCHAR values5 = VLOAD(VEC_SIZE)(0, src_addr + offset.s2); // z == 2 - // After z = 1 we can simply add src_stride_z to offset without updating z_coord - // However offset can be out-of-bound so we need to check if it is greater than max_offset - offset += (int4)src_stride_z; + // Offset can be out-of-bound so we need to check if it is greater than max_offset + z_coord = z * (int)CONV_STRIDE_Y - (int)CONV_PAD_TOP + DILATION_Y * 2; + offset = y_offset + (int4)(z_coord * src_stride_z); offset = min(offset, (int4)max_offset); VEC_UCHAR values6 = VLOAD(VEC_SIZE)(0, src_addr + offset.s0); VEC_UCHAR values7 = VLOAD(VEC_SIZE)(0, src_addr + offset.s1); diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp index 83fac16d88..ec27e419c4 100644 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp +++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp @@ -43,7 +43,7 @@ using namespace arm_compute::misc::shape_calculator; namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, - const ActivationLayerInfo &act_info) + const ActivationLayerInfo &act_info, const Size2D dilation) { ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); @@ -56,6 +56,8 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(0) != 3 || weights->dimension(1) != 3); ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1 || conv_info.stride().first > 3); + ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || (dilation.y() < 1)); + const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type()); if(biases != nullptr) @@ -74,7 +76,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, if(output->total_size() != 0) { - const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier); + const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); } @@ -82,10 +84,10 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, } std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, - GPUTarget gpu_target, std::string &kernel_name) + GPUTarget gpu_target, std::string &kernel_name, const Size2D dilation) { // Output auto inizialitation if not yet initialized - const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier); + const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier, dilation); auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape)); const unsigned int conv_stride_x = conv_info.stride().first; @@ -176,10 +178,12 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen kernel_name += (is_qasymm ? "_nchw" : ""); num_elems_written_per_iteration_x = 8 / data_size_from_type(input->data_type()); - num_elems_written_per_iteration_y = (is_qasymm && conv_stride_y == 1) ? 2 : 1; + num_elems_written_per_iteration_y = (is_qasymm && conv_stride_y == 1 && dilation.y() == 1) ? 2 : 1; num_elems_read_per_iteration_x = 3 + (num_elems_written_per_iteration_x - 1) * conv_stride_x; num_elems_read_per_iteration_y = num_elems_written_per_iteration_y + 2; } + num_elems_read_per_iteration_x += (num_elems_read_per_iteration_x - 1) * (dilation.x() - 1); + num_elems_read_per_iteration_y += (num_elems_read_per_iteration_y - 1) * (dilation.y() - 1); // Create window and update padding Window win = calculate_max_window(*output, Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y)); @@ -210,10 +214,10 @@ BorderSize CLDepthwiseConvolutionLayer3x3NCHWKernel::border_size() const } void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, - unsigned int depth_multiplier, ActivationLayerInfo act_info) + unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info, depth_multiplier, act_info)); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info, depth_multiplier, act_info, dilation)); bool is_qasymm = is_data_type_quantized_asymmetric(input->info()->data_type()); @@ -231,7 +235,7 @@ void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(const ICLTensor *input, std::string kernel_name; const GPUTarget gpu_target = get_target(); - auto win_config = validate_and_configure_window(input->info(), weights->info(), output->info(), conv_info, depth_multiplier, gpu_target, kernel_name); + auto win_config = validate_and_configure_window(input->info(), weights->info(), output->info(), conv_info, depth_multiplier, gpu_target, kernel_name, dilation); ARM_COMPUTE_ERROR_THROW_ON(win_config.first); ICLKernel::configure_internal(win_config.second); @@ -240,6 +244,8 @@ void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(const ICLTensor *input, build_opts.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(_output->info()->tensor_shape().z())); build_opts.add_option("-DDEPTH_MULTIPLIER=" + support::cpp11::to_string(depth_multiplier)); build_opts.add_option("-DCONV_STRIDE_X=" + support::cpp11::to_string(_conv_stride_x)); + build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(dilation.x())); + build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(dilation.y())); build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS"); if(is_qasymm) @@ -292,12 +298,11 @@ void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(const ICLTensor *input, } Status CLDepthwiseConvolutionLayer3x3NCHWKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, - unsigned int depth_multiplier, - ActivationLayerInfo act_info, GPUTarget gpu_target) + unsigned int depth_multiplier, ActivationLayerInfo act_info, GPUTarget gpu_target, const Size2D &dilation) { std::string kernel_name; - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info, depth_multiplier, act_info)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(), conv_info, depth_multiplier, gpu_target, kernel_name).first); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(), conv_info, depth_multiplier, gpu_target, kernel_name, dilation).first); return Status{}; } diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp index 431039c31f..86d186b95e 100644 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp +++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp @@ -42,7 +42,7 @@ namespace arm_compute namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, - const ActivationLayerInfo &act_info) + const ActivationLayerInfo &act_info, const Size2D &dilation) { ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32, DataType::QASYMM8); @@ -57,6 +57,8 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1); ARM_COMPUTE_RETURN_ERROR_ON(std::max(conv_info.pad_top(), conv_info.pad_bottom()) > 1); + ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || (dilation.y() < 1)); + const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type()); const size_t weights_width = 3; const size_t weights_height = 3; @@ -89,7 +91,8 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, if(output->total_size() != 0) { - const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(*input, weights_width, weights_height, conv_info, depth_multiplier); + const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape( + *input, TensorInfo(TensorShape(weights_width, weights_height), 1, weights->data_type()).set_data_layout(DataLayout::NCHW), conv_info, depth_multiplier, dilation); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); } @@ -97,13 +100,14 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, } std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *output, - const PadStrideInfo &conv_info, unsigned int depth_multiplier) + const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation) { const size_t weights_width = 3; const size_t weights_height = 3; // Get convolved dimensions - const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(*input, weights_width, weights_height, conv_info, depth_multiplier); + const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape( + *input, TensorInfo(TensorShape(weights_width, weights_height), 1, weights->data_type()).set_data_layout(DataLayout::NCHW), conv_info, depth_multiplier, dilation); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output, @@ -112,10 +116,10 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen input->data_type(), input->quantization_info()); - const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type()); - const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1)); + const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type()); + const bool is_stride_1_dilation_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1) && dilation.x() == 1 && dilation.y() == 1); - const unsigned int num_rows_processed_per_iteration = is_stride_1 ? 2 : 1; + const unsigned int num_rows_processed_per_iteration = is_stride_1_dilation_1 ? 2 : 1; const unsigned int num_elems_accessed_per_iteration = is_qasymm ? 4 : (8 / input->element_size()); const unsigned int num_rows_read_per_iteration = num_rows_processed_per_iteration + 2; const unsigned int num_rows_written_per_iteration = std::ceil(num_rows_processed_per_iteration / static_cast(conv_info.stride().first)); @@ -166,15 +170,17 @@ BorderSize CLDepthwiseConvolutionLayer3x3NHWCKernel::border_size() const } void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, - unsigned int depth_multiplier, ActivationLayerInfo act_info) + unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info, depth_multiplier, act_info)); - auto win_config = validate_and_configure_window(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info, depth_multiplier); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info, depth_multiplier, act_info, dilation)); + auto win_config = validate_and_configure_window(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info, depth_multiplier, dilation); ARM_COMPUTE_ERROR_THROW_ON(win_config.first); - const bool is_qasymm = is_data_type_quantized_asymmetric(input->info()->data_type()); - const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1)); + const bool is_qasymm = is_data_type_quantized_asymmetric(input->info()->data_type()); + const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1)); + const bool is_stride_1_dilation_1 = (is_stride_1 && dilation.x() == 1 && dilation.y() == 1); + const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device()); _input = input; @@ -182,8 +188,8 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input, _weights = weights; _biases = biases; _conv_stride_y = conv_info.stride().second; - _num_rows_processed_per_iteration = is_stride_1 ? 2 : 1; - _num_planes_processed_per_iteration = is_stride_1 ? 2 : 1; + _num_rows_processed_per_iteration = is_stride_1_dilation_1 ? 2 : 1; + _num_planes_processed_per_iteration = is_stride_1_dilation_1 ? 2 : 1; // If QASYMM8 and the 8 bit dot product is available, force _num_planes_processed_per_iteration to 1 if(is_dot8_supported && is_qasymm) @@ -201,6 +207,8 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input, build_opts.add_option("-DSRC_DIM_2=" + support::cpp11::to_string(_input->info()->dimension(2))); build_opts.add_option("-DCONV_PAD_TOP=" + support::cpp11::to_string(conv_info.pad_top())); build_opts.add_option("-DCONV_PAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left())); + build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(dilation.x())); + build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(dilation.y())); if(is_qasymm) { @@ -238,7 +246,7 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input, build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(_input->info()->data_type())); } - if(is_stride_1) + if(is_stride_1_dilation_1) { build_opts.add_option("-DNUM_ROWS_PROCESSED=" + support::cpp11::to_string(_num_rows_processed_per_iteration)); build_opts.add_option("-DNUM_PLANES_PROCESSED=" + support::cpp11::to_string(_num_planes_processed_per_iteration)); @@ -257,14 +265,14 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input, if(is_qasymm) { kernel_name = std::string("dwc_3x3_reshaped_qasymm8"); - kernel_name += (is_dot8_supported && is_stride_1 ? "_dot8" : ""); - kernel_name += (is_stride_1 ? "_stride1" : ""); + kernel_name += (is_dot8_supported && is_stride_1_dilation_1 ? "_dot8" : ""); + kernel_name += (is_stride_1_dilation_1 ? "_stride1" : ""); kernel_name += "_nhwc"; } else { kernel_name = std::string("depthwise_convolution_3x3_nhwc"); - kernel_name += (is_stride_1 ? "_stride1" : ""); + kernel_name += (is_stride_1_dilation_1 ? "_stride1" : ""); } ICLKernel::configure_internal(win_config.second); @@ -287,13 +295,12 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input, } Status CLDepthwiseConvolutionLayer3x3NHWCKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, - unsigned int depth_multiplier, - ActivationLayerInfo act_info) + unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation) { - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info, depth_multiplier, act_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation)); ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), biases != nullptr ? biases->clone().get() : nullptr, - output->clone().get(), conv_info, depth_multiplier) + output->clone().get(), conv_info, depth_multiplier, dilation) .first); return Status{}; diff --git a/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp b/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp index beff7ae8c4..28d4ff2759 100644 --- a/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp +++ b/src/core/CL/kernels/CLDepthwiseIm2ColKernel.cpp @@ -44,7 +44,8 @@ CLDepthwiseIm2ColKernel::CLDepthwiseIm2ColKernel() namespace { -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier) +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier, + const Size2D &dilation) { const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL); @@ -55,16 +56,18 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(input->data_type()) && has_bias); ARM_COMPUTE_RETURN_ERROR_ON((input->dimension(idx_c) * depth_multiplier) != output->dimension(2)); ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(0) != (kernel_dims.width * kernel_dims.height + ((has_bias) ? 1 : 0))); + ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || dilation.y() < 1); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); return Status{}; } } // namespace -void CLDepthwiseIm2ColKernel::configure(const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier) +void CLDepthwiseIm2ColKernel::configure(const ICLTensor *input, ICLTensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier, + const Size2D &dilation) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, depth_multiplier)); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), kernel_dims, conv_info, has_bias, depth_multiplier, dilation)); _input = input; _output = output; @@ -88,6 +91,8 @@ void CLDepthwiseIm2ColKernel::configure(const ICLTensor *input, ICLTensor *outpu build_opts.add_option("-DKERNEL_WIDTH=" + support::cpp11::to_string(kernel_dims.width)); build_opts.add_option("-DKERNEL_HEIGHT=" + support::cpp11::to_string(kernel_dims.height)); build_opts.add_option("-DDEPTH_MULTIPLIER=" + support::cpp11::to_string(depth_multiplier)); + build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(dilation.x())); + build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(dilation.y())); build_opts.add_option("-D" + string_from_data_layout(input->info()->data_layout())); build_opts.add_option_if(has_bias, "-DHAS_BIAS"); build_opts.add_option_if_else(is_data_type_quantized_asymmetric(input->info()->data_type()), @@ -104,9 +109,10 @@ void CLDepthwiseIm2ColKernel::configure(const ICLTensor *input, ICLTensor *outpu ICLKernel::configure_internal(win); } -Status CLDepthwiseIm2ColKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier) +Status CLDepthwiseIm2ColKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias, unsigned int depth_multiplier, + const Size2D &dilation) { - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, kernel_dims, conv_info, has_bias, depth_multiplier)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, kernel_dims, conv_info, has_bias, depth_multiplier, dilation)); return Status{}; } -- cgit v1.2.1