From 6d109965f3641056bb8164dc8450a7327e76e939 Mon Sep 17 00:00:00 2001 From: giuros01 Date: Mon, 7 Jan 2019 17:47:19 +0000 Subject: COMPMID-1691: Optimize CLDepthwiseConvolutionKernel (QASYMM8/NHWC) for 3x3 kernels (stride=1 and stride=2) Change-Id: I7d0d2dc350feeb40d253d17f9ffd5051a8fb42ef Reviewed-on: https://review.mlplatform.org/511 Reviewed-by: Gian Marco Iodice Tested-by: Arm Jenkins --- src/core/CL/CLKernelLibrary.cpp | 3 +- src/core/CL/cl_kernels/depthwise_convolution.cl | 112 +++++++++++- .../cl_kernels/depthwise_convolution_quantized.cl | 192 +++++++++------------ .../CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp | 85 +++++---- ...ConvolutionLayerReshapeWeightsGenericKernel.cpp | 146 ++++++++++++++++ ...pthwiseConvolutionLayerReshapeWeightsKernel.cpp | 125 ++++++++++++++ .../CL/kernels/CLDepthwiseWeightsReshapeKernel.cpp | 146 ---------------- .../CL/functions/CLDepthwiseConvolutionLayer.cpp | 78 +++++++-- 8 files changed, 570 insertions(+), 317 deletions(-) create mode 100644 src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.cpp create mode 100644 src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp delete mode 100644 src/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.cpp (limited to 'src') diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 4635d11a3a..2176c59f94 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -225,9 +225,10 @@ const std::map CLKernelLibrary::_kernel_program_map = { "depthwise_convolution_3x3_stridex2_stridey2_bifrost_f16", "depthwise_convolution.cl" }, { "depthwise_convolution_3x3_stridex1_stridey1_bifrost_f32", "depthwise_convolution.cl" }, { "depthwise_convolution_3x3_stridex2_stridey2_bifrost_f32", "depthwise_convolution.cl" }, + { "depthwise_convolution_reshape_weights", "depthwise_convolution.cl" }, + { "depthwise_convolution_reshape_weights_generic", "depthwise_convolution.cl" }, { "depthwise_im2col", "depthwise_convolution.cl" }, { "depthwise_vector_to_tensor", "depthwise_convolution.cl" }, - { "depthwise_weights_reshape", "depthwise_convolution.cl" }, { "dequantization_layer", "dequantization_layer.cl" }, { "derivative", "derivative.cl" }, { "dilate", "dilate.cl" }, diff --git a/src/core/CL/cl_kernels/depthwise_convolution.cl b/src/core/CL/cl_kernels/depthwise_convolution.cl index bfaa92be10..4f6fdfafee 100644 --- a/src/core/CL/cl_kernels/depthwise_convolution.cl +++ b/src/core/CL/cl_kernels/depthwise_convolution.cl @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -464,6 +464,104 @@ __kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f32( #endif // defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) +#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DST_WIDTH) +/** Reshape the weights for quantized depthwise convolution + * + * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type, e.g. -DDATA_TYPE=uint8 + * @note Output width should be given as a preprocessor argument using -DDST_WIDTH=width, e.g. -DDST_WIDTH=128 + * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=vec_size, e.g., -DVEC_SIZE=4 + * @attention Input's height and width should be 3 + * + * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8 + * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor + * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + */ +__kernel void depthwise_convolution_reshape_weights( + TENSOR3D_DECLARATION(src), + IMAGE_DECLARATION(dst)) +{ + Vector src = CONVERT_TO_VECTOR_STRUCT(src); + const int x = get_global_id(0); + + // Load 3x3xVEC_SIZE weights + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) + w0 = VLOAD(VEC_SIZE)(0, src.ptr + 0 * src_stride_y + 0 * src_stride_z); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) + w1 = VLOAD(VEC_SIZE)(0, src.ptr + 1 * src_stride_y + 0 * src_stride_z); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) + w2 = VLOAD(VEC_SIZE)(0, src.ptr + 2 * src_stride_y + 0 * src_stride_z); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) + w3 = VLOAD(VEC_SIZE)(0, src.ptr + 0 * src_stride_y + 1 * src_stride_z); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) + w4 = VLOAD(VEC_SIZE)(0, src.ptr + 1 * src_stride_y + 1 * src_stride_z); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) + w5 = VLOAD(VEC_SIZE)(0, src.ptr + 2 * src_stride_y + 1 * src_stride_z); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) + w6 = VLOAD(VEC_SIZE)(0, src.ptr + 0 * src_stride_y + 2 * src_stride_z); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) + w7 = VLOAD(VEC_SIZE)(0, src.ptr + 1 * src_stride_y + 2 * src_stride_z); + VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) + w8 = VLOAD(VEC_SIZE)(0, src.ptr + 2 * src_stride_y + 2 * src_stride_z); + + __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * DST_WIDTH * sizeof(DATA_TYPE); + +#if defined(TRANSPOSE) +#if VEC_SIZE != 4 +#error "VEC_SIZE not supported" +#else // VEC_SIZE != 4 + VSTORE(VEC_SIZE) + ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w0.s0, w1.s0, w2.s0, w3.s0), 0, dst_addr + 0); + VSTORE(VEC_SIZE) + ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w4.s0, w5.s0, w6.s0, w7.s0), 0, dst_addr + 1 * sizeof(DATA_TYPE) * VEC_SIZE); + VSTORE(VEC_SIZE) + ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w8.s0, w0.s1, w1.s1, w2.s1), 0, dst_addr + 2 * sizeof(DATA_TYPE) * VEC_SIZE); + VSTORE(VEC_SIZE) + ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w3.s1, w4.s1, w5.s1, w6.s1), 0, dst_addr + 3 * sizeof(DATA_TYPE) * VEC_SIZE); + VSTORE(VEC_SIZE) + ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w7.s1, w8.s1, w0.s2, w1.s2), 0, dst_addr + 4 * sizeof(DATA_TYPE) * VEC_SIZE); + VSTORE(VEC_SIZE) + ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w2.s2, w3.s2, w4.s2, w5.s2), 0, dst_addr + 5 * sizeof(DATA_TYPE) * VEC_SIZE); + VSTORE(VEC_SIZE) + ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w6.s2, w7.s2, w8.s2, w0.s3), 0, dst_addr + 6 * sizeof(DATA_TYPE) * VEC_SIZE); + VSTORE(VEC_SIZE) + ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w1.s3, w2.s3, w3.s3, w4.s3), 0, dst_addr + 7 * sizeof(DATA_TYPE) * VEC_SIZE); + VSTORE(VEC_SIZE) + ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w5.s3, w6.s3, w7.s3, w8.s3), 0, dst_addr + 8 * sizeof(DATA_TYPE) * VEC_SIZE); +#endif // VEC_SIZE != 4 +#else // !defined(TRANSPOSE) + VSTORE(VEC_SIZE) + (w0, 0, dst_addr + 0); + VSTORE(VEC_SIZE) + (w1, 0, dst_addr + 1 * sizeof(DATA_TYPE) * VEC_SIZE); + VSTORE(VEC_SIZE) + (w2, 0, dst_addr + 2 * sizeof(DATA_TYPE) * VEC_SIZE); + VSTORE(VEC_SIZE) + (w3, 0, dst_addr + 3 * sizeof(DATA_TYPE) * VEC_SIZE); + VSTORE(VEC_SIZE) + (w4, 0, dst_addr + 4 * sizeof(DATA_TYPE) * VEC_SIZE); + VSTORE(VEC_SIZE) + (w5, 0, dst_addr + 5 * sizeof(DATA_TYPE) * VEC_SIZE); + VSTORE(VEC_SIZE) + (w6, 0, dst_addr + 6 * sizeof(DATA_TYPE) * VEC_SIZE); + VSTORE(VEC_SIZE) + (w7, 0, dst_addr + 7 * sizeof(DATA_TYPE) * VEC_SIZE); + VSTORE(VEC_SIZE) + (w8, 0, dst_addr + 8 * sizeof(DATA_TYPE) * VEC_SIZE); +#endif // defined(TRANSPOSE) +} +#endif // defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DST_WIDTH) + #if defined(NCHW) #define in_stride_x src_stride_x #define in_stride_y src_stride_y @@ -504,7 +602,7 @@ __kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f32( * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector */ -__kernel void depthwise_weights_reshape( +__kernel void depthwise_convolution_reshape_weights_generic( TENSOR3D_DECLARATION(src), IMAGE_DECLARATION(dst) #ifdef HAS_BIAS @@ -1091,9 +1189,9 @@ __kernel void depthwise_convolution_3x3_nhwc( #if defined(DST_DEPTH) int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y int b = get_global_id(2) / (int)DST_DEPTH; // batch -#else /* defined(DST_DEPTH) */ +#else // defined(DST_DEPTH) int z = get_global_id(2); // spatial coordinate y -#endif /* defined(DST_DEPTH) */ +#endif // defined(DST_DEPTH) Vector weights = CONVERT_TO_VECTOR_STRUCT(weights); @@ -1240,9 +1338,9 @@ __kernel void depthwise_convolution_3x3_nhwc_stride1( #if defined(DST_DEPTH) int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y int b = get_global_id(2) / (int)DST_DEPTH; // batch -#else /* defined(DST_DEPTH) */ +#else // defined(DST_DEPTH) int z = get_global_id(2); // spatial coordinate y -#endif /* defined(DST_DEPTH) */ +#endif // defined(DST_DEPTH) Vector weights = CONVERT_TO_VECTOR_STRUCT(weights); @@ -1394,4 +1492,4 @@ __kernel void depthwise_convolution_3x3_nhwc_stride1( } #endif // defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED) -#endif // defined(VEC_SIZE) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT) && defined(DATA_TYPE) \ No newline at end of file +#endif // defined(VEC_SIZE) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT) && defined(DATA_TYPE) diff --git a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl index 5a732b4863..606af2edb1 100644 --- a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl +++ b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -574,62 +574,25 @@ __kernel void depthwise_convolution_3x3_quantized_dot8_nchw( #endif /* WEIGHTS_OFFSET != 0 */ #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) -#define DOT_PRODUCT(acc, val0, val1, val2, val3, val4, val5, val6, val7, val8, w0, w1, w2, w3, w4, w5, w6, w7, w8) \ - ({ \ - ARM_DOT((uchar4)(val0.s0, val1.s0, val2.s0, val3.s0), (uchar4)(w0.s0, w1.s0, w2.s0, w3.s0), acc.s0); \ - ARM_DOT((uchar4)(val4.s0, val5.s0, val6.s0, val7.s0), (uchar4)(w4.s0, w5.s0, w6.s0, w7.s0), acc.s0); \ - acc.s0 += val8.s0 * w8.s0; \ - \ - ARM_DOT((uchar4)(val0.s1, val1.s1, val2.s1, val3.s1), (uchar4)(w0.s1, w1.s1, w2.s1, w3.s1), acc.s1); \ - ARM_DOT((uchar4)(val4.s1, val5.s1, val6.s1, val7.s1), (uchar4)(w4.s1, w5.s1, w6.s1, w7.s1), acc.s1); \ - acc.s1 += val8.s1 * w8.s1; \ - \ - ARM_DOT((uchar4)(val0.s2, val1.s2, val2.s2, val3.s2), (uchar4)(w0.s2, w1.s2, w2.s2, w3.s2), acc.s2); \ - ARM_DOT((uchar4)(val4.s2, val5.s2, val6.s2, val7.s2), (uchar4)(w4.s2, w5.s2, w6.s2, w7.s2), acc.s2); \ - acc.s2 += val8.s2 * w8.s2; \ - \ - ARM_DOT((uchar4)(val0.s3, val1.s3, val2.s3, val3.s3), (uchar4)(w0.s3, w1.s3, w2.s3, w3.s3), acc.s3); \ - ARM_DOT((uchar4)(val4.s3, val5.s3, val6.s3, val7.s3), (uchar4)(w4.s3, w5.s3, w6.s3, w7.s3), acc.s3); \ - acc.s3 += val8.s3 * w8.s3; \ +#define DOT_PRODUCT(acc, val0, val1, val2, val3, val4, val5, val6, val7, val8, w0, w1) \ + ({ \ + ARM_DOT((uchar4)(val0, val1, val2, val3), w0.s0123, acc); \ + ARM_DOT((uchar4)(val4, val5, val6, val7), w0.s4567, acc); \ + acc += val8 * w1; \ }) -#if WEIGHTS_OFFSET != 0 -#define DOT_PRODUCT_ACCUMULATE(acc, val0, val1, val2, val3, val4, val5, val6, val7, val8, w0, w1, w2, w3, w4, w5, w6, w7, w8) \ - ({ \ - ARM_DOT((uchar4)(w0.s0, w1.s0, w2.s0, w3.s0), (uchar4)(val0.s0, val1.s0, val2.s0, val3.s0), acc.s0); \ - ARM_DOT((uchar4)(w4.s0, w5.s0, w6.s0, w7.s0), (uchar4)(val4.s0, val5.s0, val6.s0, val7.s0), acc.s0); \ - ARM_DOT((uchar4)(w8.s0, 0, 0, 0), (uchar4)val8.s0, acc.s0); \ - \ - ARM_DOT((uchar4)(w0.s1, w1.s1, w2.s1, w3.s1), (uchar4)(val0.s1, val1.s1, val2.s1, val3.s1), acc.s1); \ - ARM_DOT((uchar4)(w4.s1, w5.s1, w6.s1, w7.s1), (uchar4)(val4.s1, val5.s1, val6.s1, val7.s1), acc.s1); \ - ARM_DOT((uchar4)(w8.s1, 0, 0, 0), (uchar4)val8.s1, acc.s1); \ - \ - ARM_DOT((uchar4)(w0.s2, w1.s2, w2.s2, w3.s2), (uchar4)(val0.s2, val1.s2, val2.s2, val3.s2), acc.s2); \ - ARM_DOT((uchar4)(w4.s2, w5.s2, w6.s2, w7.s2), (uchar4)(val4.s2, val5.s2, val6.s2, val7.s2), acc.s2); \ - ARM_DOT((uchar4)(w8.s2, 0, 0, 0), (uchar4)val8.s2, acc.s2); \ - \ - ARM_DOT((uchar4)(w0.s3, w1.s3, w2.s3, w3.s3), (uchar4)(val0.s3, val1.s3, val2.s3, val3.s3), acc.s3); \ - ARM_DOT((uchar4)(w4.s3, w5.s3, w6.s3, w7.s3), (uchar4)(val4.s3, val5.s3, val6.s3, val7.s3), acc.s3); \ - ARM_DOT((uchar4)(w8.s3, 0, 0, 0), (uchar4)val8.s3, acc.s3); \ - }) -#else /* WEIGHTS_OFFSET != 0 */ -#define DOT_PRODUCT_ACCUMULATE(acc, val0, val1, val2, val3, val4, val5, val6, val7, val8, w0, w1, w2, w3, w4, w5, w6, w7, w8) DOT_PRODUCT(acc, val0, val1, val2, val3, val4, val5, val6, val7, val8, w0, w1, w2, w3, w4, w5, w6, w7, w8) -#endif /* WEIGHTS_OFFSET != 0 */ - #define DOT_PRODUCT_REDUCTION(sum, val0, val1, val2, val3, val4, val5, val6, val7, val8) \ ({ \ - sum = CONVERT(val0, VEC_INT); \ - ARM_DOT((uchar4)(val1.s0, val2.s0, val3.s0, val4.s0), (uchar4)1, sum.s0); \ - ARM_DOT((uchar4)(val5.s0, val6.s0, val7.s0, val8.s0), (uchar4)1, sum.s0); \ - \ - ARM_DOT((uchar4)(val1.s1, val2.s1, val3.s1, val4.s1), (uchar4)1, sum.s1); \ - ARM_DOT((uchar4)(val5.s1, val6.s1, val7.s1, val8.s1), (uchar4)1, sum.s1); \ - \ - ARM_DOT((uchar4)(val1.s2, val2.s2, val3.s2, val4.s2), (uchar4)1, sum.s2); \ - ARM_DOT((uchar4)(val5.s2, val6.s2, val7.s2, val8.s2), (uchar4)1, sum.s2); \ - \ - ARM_DOT((uchar4)(val1.s3, val2.s3, val3.s3, val4.s3), (uchar4)1, sum.s3); \ - ARM_DOT((uchar4)(val5.s3, val6.s3, val7.s3, val8.s3), (uchar4)1, sum.s3); \ + sum = val0; \ + ARM_DOT((uchar4)(val1, val2, val3, val4), (uchar4)1, sum); \ + ARM_DOT((uchar4)(val5, val6, val7, val8), (uchar4)1, sum); \ + }) + +#define DOT_PRODUCT_REDUCTION_WEIGHTS(sum, w0, w1) \ + ({ \ + sum = w1; \ + ARM_DOT(w0.s0123, (uchar4)1, sum); \ + ARM_DOT(w0.s4567, (uchar4)1, sum); \ }) #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) @@ -637,6 +600,7 @@ __kernel void depthwise_convolution_3x3_quantized_dot8_nchw( #if defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) /** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width or height is not 1. * + * @note The weights tensor is expected to be reshaped using @ref CLDepthwiseConvolutionLayerReshapeWeightsKernel. * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2) * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112) * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1) @@ -664,13 +628,11 @@ __kernel void depthwise_convolution_3x3_quantized_dot8_nchw( * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes) * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes) * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor - * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr + * @param[in] weights_ptr Pointer to the weights tensor reshaped. Supported data types: same as @p src_ptr * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes) - * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes) * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: same as @p src_ptr * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) @@ -681,7 +643,7 @@ __kernel void depthwise_convolution_3x3_quantized_dot8_nchw( __kernel void depthwise_convolution_3x3_quantized_nhwc( TENSOR4D_DECLARATION(src), TENSOR4D_DECLARATION(dst), - TENSOR3D_DECLARATION(weights), + IMAGE_DECLARATION(weights), #if defined(HAS_BIAS) VECTOR_DECLARATION(biases), #endif /* defined(HAS_BIAS) */ @@ -692,11 +654,11 @@ __kernel void depthwise_convolution_3x3_quantized_nhwc( #if defined(DST_DEPTH) int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y int b = get_global_id(2) / (int)DST_DEPTH; // batch -#else /* defined(DST_DEPTH) */ +#else // defined(DST_DEPTH) int z = get_global_id(2); // spatial coordinate y -#endif /* defined(DST_DEPTH) */ +#endif // defined(DST_DEPTH) - Vector weights = CONVERT_TO_VECTOR_STRUCT(weights); + __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x * weights_stride_y; #if defined(DST_DEPTH) __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE + b * src_stride_w; @@ -716,19 +678,19 @@ __kernel void depthwise_convolution_3x3_quantized_nhwc( int4 y_offset = convert_int4(y_coord * (int)src_stride_y); - // We compute 4x1x1 [C,W,H] elements + // We compute VEC_SIZEx1x1 [C,W,H] elements VEC_INT acc = 0, sum = 0; // Load weights - VEC_UCHAR w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 0 * weights_stride_z); - VEC_UCHAR w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 0 * weights_stride_z); - VEC_UCHAR w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 0 * weights_stride_z); - VEC_UCHAR w3 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 1 * weights_stride_z); - VEC_UCHAR w4 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 1 * weights_stride_z); - VEC_UCHAR w5 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 1 * weights_stride_z); - VEC_UCHAR w6 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 2 * weights_stride_z); - VEC_UCHAR w7 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 2 * weights_stride_z); - VEC_UCHAR w8 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 2 * weights_stride_z); + VEC_UCHAR w0 = VLOAD(VEC_SIZE)(0, weights_addr + 0); + VEC_UCHAR w1 = VLOAD(VEC_SIZE)(0, weights_addr + VEC_SIZE); + VEC_UCHAR w2 = VLOAD(VEC_SIZE)(0, weights_addr + 2 * VEC_SIZE); + VEC_UCHAR w3 = VLOAD(VEC_SIZE)(0, weights_addr + 3 * VEC_SIZE); + VEC_UCHAR w4 = VLOAD(VEC_SIZE)(0, weights_addr + 4 * VEC_SIZE); + VEC_UCHAR w5 = VLOAD(VEC_SIZE)(0, weights_addr + 5 * VEC_SIZE); + VEC_UCHAR w6 = VLOAD(VEC_SIZE)(0, weights_addr + 6 * VEC_SIZE); + VEC_UCHAR w7 = VLOAD(VEC_SIZE)(0, weights_addr + 7 * VEC_SIZE); + VEC_UCHAR w8 = VLOAD(VEC_SIZE)(0, weights_addr + 8 * VEC_SIZE); #if INPUT_OFFSET != 0 VEC_INT sum_we = CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT) @@ -824,8 +786,9 @@ __kernel void depthwise_convolution_3x3_quantized_nhwc( #endif // defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) #if defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED) -/** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width and height is 1 +/** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width and height is 1. * + * @note The weights tensor is expected to be reshaped using @ref CLDepthwiseConvolutionLayerReshapeWeightsKernel. * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2) * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112) * @note The number of rows processed per thread must be passed at compile time using -DNUM_ROWS_PROCESSED (i.e. -DNUM_ROWS_PROCESSED=2) @@ -858,8 +821,6 @@ __kernel void depthwise_convolution_3x3_quantized_nhwc( * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes) - * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes) * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: same as @p src_ptr * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) @@ -871,7 +832,7 @@ __kernel void depthwise_convolution_3x3_quantized_nhwc( __kernel void depthwise_convolution_3x3_quantized_nhwc_stride1( TENSOR4D_DECLARATION(src), TENSOR4D_DECLARATION(dst), - TENSOR3D_DECLARATION(weights), + IMAGE_DECLARATION(weights), #if defined(HAS_BIAS) VECTOR_DECLARATION(biases), #endif /* defined(HAS_BIAS) */ @@ -882,11 +843,11 @@ __kernel void depthwise_convolution_3x3_quantized_nhwc_stride1( #if defined(DST_DEPTH) int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y int b = get_global_id(2) / (int)DST_DEPTH; // batch -#else /* defined(DST_DEPTH) */ +#else // defined(DST_DEPTH) int z = get_global_id(2); // spatial coordinate y -#endif /* defined(DST_DEPTH) */ +#endif // defined(DST_DEPTH) - Vector weights = CONVERT_TO_VECTOR_STRUCT(weights); + __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x * weights_stride_y; #if defined(DST_DEPTH) __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE + b * src_stride_w; @@ -913,15 +874,15 @@ __kernel void depthwise_convolution_3x3_quantized_nhwc_stride1( VEC_INT acc3 = 0, sum3 = 0; // Load weights - VEC_UCHAR w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 0 * weights_stride_z); - VEC_UCHAR w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 0 * weights_stride_z); - VEC_UCHAR w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 0 * weights_stride_z); - VEC_UCHAR w3 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 1 * weights_stride_z); - VEC_UCHAR w4 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 1 * weights_stride_z); - VEC_UCHAR w5 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 1 * weights_stride_z); - VEC_UCHAR w6 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 2 * weights_stride_z); - VEC_UCHAR w7 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 2 * weights_stride_z); - VEC_UCHAR w8 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 2 * weights_stride_z); + VEC_UCHAR w0 = VLOAD(VEC_SIZE)(0, weights_addr + 0); + VEC_UCHAR w1 = VLOAD(VEC_SIZE)(0, weights_addr + VEC_SIZE); + VEC_UCHAR w2 = VLOAD(VEC_SIZE)(0, weights_addr + 2 * VEC_SIZE); + VEC_UCHAR w3 = VLOAD(VEC_SIZE)(0, weights_addr + 3 * VEC_SIZE); + VEC_UCHAR w4 = VLOAD(VEC_SIZE)(0, weights_addr + 4 * VEC_SIZE); + VEC_UCHAR w5 = VLOAD(VEC_SIZE)(0, weights_addr + 5 * VEC_SIZE); + VEC_UCHAR w6 = VLOAD(VEC_SIZE)(0, weights_addr + 6 * VEC_SIZE); + VEC_UCHAR w7 = VLOAD(VEC_SIZE)(0, weights_addr + 7 * VEC_SIZE); + VEC_UCHAR w8 = VLOAD(VEC_SIZE)(0, weights_addr + 8 * VEC_SIZE); #if INPUT_OFFSET != 0 VEC_INT sum_we = CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT) @@ -1103,9 +1064,11 @@ __kernel void depthwise_convolution_3x3_quantized_nhwc_stride1( } } -#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) -/** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width and height is 1 using dot product +#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) && VEC_SIZE == 4 +/** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width and height is 1 using dot product. * + * @note This kernel assumes VEC_SIZE is 4. + * @note The weights tensor is expected to be reshaped using @ref CLDepthwiseConvolutionLayerReshapeWeightsKernel. * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2) * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112) * @note The number of rows processed per thread must be passed at compile time using -DNUM_ROWS_PROCESSED (i.e. -DNUM_ROWS_PROCESSED=2) @@ -1140,8 +1103,6 @@ __kernel void depthwise_convolution_3x3_quantized_nhwc_stride1( * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes) - * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes) * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: QASYMM8 * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) @@ -1149,11 +1110,10 @@ __kernel void depthwise_convolution_3x3_quantized_nhwc_stride1( * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector * @param[in] max_offset The maximum allowed offset for the input tensor */ - __kernel void depthwise_convolution_3x3_quantized_dot8_nhwc_stride1( TENSOR4D_DECLARATION(src), TENSOR4D_DECLARATION(dst), - TENSOR3D_DECLARATION(weights), + IMAGE_DECLARATION(weights), #if defined(HAS_BIAS) VECTOR_DECLARATION(biases), #endif // defined(HAS_BIAS) @@ -1164,11 +1124,11 @@ __kernel void depthwise_convolution_3x3_quantized_dot8_nhwc_stride1( #if defined(DST_DEPTH) int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y int b = get_global_id(2) / (int)DST_DEPTH; // batch -#else /* defined(DST_DEPTH) */ +#else // defined(DST_DEPTH) int z = get_global_id(2); // spatial coordinate y -#endif /* defined(DST_DEPTH) */ +#endif // defined(DST_DEPTH) - Vector weights = CONVERT_TO_VECTOR_STRUCT(weights); + __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x * weights_stride_y; #if defined(DST_DEPTH) __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE + b * src_stride_w; @@ -1195,19 +1155,16 @@ __kernel void depthwise_convolution_3x3_quantized_dot8_nhwc_stride1( VEC_INT sum1 = 0; // Load weights - VEC_UCHAR w0 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 0 * weights_stride_z); - VEC_UCHAR w1 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 0 * weights_stride_z); - VEC_UCHAR w2 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 0 * weights_stride_z); - VEC_UCHAR w3 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 1 * weights_stride_z); - VEC_UCHAR w4 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 1 * weights_stride_z); - VEC_UCHAR w5 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 1 * weights_stride_z); - VEC_UCHAR w6 = VLOAD(VEC_SIZE)(0, weights.ptr + 0 * weights_stride_y + 2 * weights_stride_z); - VEC_UCHAR w7 = VLOAD(VEC_SIZE)(0, weights.ptr + 1 * weights_stride_y + 2 * weights_stride_z); - VEC_UCHAR w8 = VLOAD(VEC_SIZE)(0, weights.ptr + 2 * weights_stride_y + 2 * weights_stride_z); + uchar16 w0 = VLOAD(16)(0, weights_addr); + uchar16 w1 = VLOAD(16)(0, weights_addr + 16); + uchar4 w2 = VLOAD(4)(0, weights_addr + 32); #if INPUT_OFFSET != 0 // Initilize the final result with the weights reduction multiplied by INPUT_OFFSET - DOT_PRODUCT_REDUCTION(acc0, w0, w1, w2, w3, w4, w5, w6, w7, w8); + DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s0, w0.s01234567, w0.s8); + DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s1, (uchar8)((w0.s9ABC), (w0.sDEF), w1.s0), w1.s1); + DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s2, w1.s23456789, w1.sA); + DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s3, (uchar8)((w1.sBCD), (w1.sEF), (w2.s012)), w2.s3); // Multiply the weights reduction with INPUT_OFFSET acc0 = INPUT_OFFSET * acc0; @@ -1250,11 +1207,25 @@ __kernel void depthwise_convolution_3x3_quantized_dot8_nhwc_stride1( VEC_UCHAR values10 = VLOAD(VEC_SIZE)(0, src_addr + offset.s2); VEC_UCHAR values11 = VLOAD(VEC_SIZE)(0, src_addr + offset.s3); - DOT_PRODUCT_REDUCTION(sum0, values0, values1, values2, values4, values5, values6, values8, values9, values10); - DOT_PRODUCT_ACCUMULATE(acc0, values0, values1, values2, values4, values5, values6, values8, values9, values10, w0, w1, w2, w3, w4, w5, w6, w7, w8); + DOT_PRODUCT_REDUCTION(sum0.s0, values0.s0, values1.s0, values2.s0, values4.s0, values5.s0, values6.s0, values8.s0, values9.s0, values10.s0); + DOT_PRODUCT_REDUCTION(sum1.s0, values1.s0, values2.s0, values3.s0, values5.s0, values6.s0, values7.s0, values9.s0, values10.s0, values11.s0); + DOT_PRODUCT(acc0.s0, values0.s0, values1.s0, values2.s0, values4.s0, values5.s0, values6.s0, values8.s0, values9.s0, values10.s0, w0.s01234567, w0.s8); + DOT_PRODUCT(acc1.s0, values1.s0, values2.s0, values3.s0, values5.s0, values6.s0, values7.s0, values9.s0, values10.s0, values11.s0, w0.s01234567, w0.s8); - DOT_PRODUCT_REDUCTION(sum1, values1, values2, values3, values5, values6, values7, values9, values10, values11); - DOT_PRODUCT_ACCUMULATE(acc1, values1, values2, values3, values5, values6, values7, values9, values10, values11, w0, w1, w2, w3, w4, w5, w6, w7, w8); + DOT_PRODUCT_REDUCTION(sum0.s1, values0.s1, values1.s1, values2.s1, values4.s1, values5.s1, values6.s1, values8.s1, values9.s1, values10.s1); + DOT_PRODUCT_REDUCTION(sum1.s1, values1.s1, values2.s1, values3.s1, values5.s1, values6.s1, values7.s1, values9.s1, values10.s1, values11.s1); + DOT_PRODUCT(acc0.s1, values0.s1, values1.s1, values2.s1, values4.s1, values5.s1, values6.s1, values8.s1, values9.s1, values10.s1, (uchar8)((w0.s9ABC), (w0.sDEF), w1.s0), w1.s1); + DOT_PRODUCT(acc1.s1, values1.s1, values2.s1, values3.s1, values5.s1, values6.s1, values7.s1, values9.s1, values10.s1, values11.s1, (uchar8)((w0.s9ABC), (w0.sDEF), w1.s0), w1.s1); + + DOT_PRODUCT_REDUCTION(sum0.s2, values0.s2, values1.s2, values2.s2, values4.s2, values5.s2, values6.s2, values8.s2, values9.s2, values10.s2); + DOT_PRODUCT_REDUCTION(sum1.s2, values1.s2, values2.s2, values3.s2, values5.s2, values6.s2, values7.s2, values9.s2, values10.s2, values11.s2); + DOT_PRODUCT(acc0.s2, values0.s2, values1.s2, values2.s2, values4.s2, values5.s2, values6.s2, values8.s2, values9.s2, values10.s2, w1.s23456789, w1.sA); + DOT_PRODUCT(acc1.s2, values1.s2, values2.s2, values3.s2, values5.s2, values6.s2, values7.s2, values9.s2, values10.s2, values11.s2, w1.s23456789, w1.sA); + + DOT_PRODUCT_REDUCTION(sum0.s3, values0.s3, values1.s3, values2.s3, values4.s3, values5.s3, values6.s3, values8.s3, values9.s3, values10.s3); + DOT_PRODUCT_REDUCTION(sum1.s3, values1.s3, values2.s3, values3.s3, values5.s3, values6.s3, values7.s3, values9.s3, values10.s3, values11.s3); + DOT_PRODUCT(acc0.s3, values0.s3, values1.s3, values2.s3, values4.s3, values5.s3, values6.s3, values8.s3, values9.s3, values10.s3, (uchar8)((w1.sBCD), (w1.sEF), (w2.s012)), w2.s3); + DOT_PRODUCT(acc1.s3, values1.s3, values2.s3, values3.s3, values5.s3, values6.s3, values7.s3, values9.s3, values10.s3, values11.s3, (uchar8)((w1.sBCD), (w1.sEF), (w2.s012)), w2.s3); #if defined(HAS_BIAS) Vector biases = CONVERT_TO_VECTOR_STRUCT(biases); @@ -1308,8 +1279,7 @@ __kernel void depthwise_convolution_3x3_quantized_dot8_nhwc_stride1( VSTORE(VEC_SIZE) (ACTIVATION_FUNC(res1), 0, dst_addr + 1 * dst_stride_y); } - -#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) +#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) && VEC_SIZE==4 #endif // defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED) diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp index 594d0b6981..5e5a35c14c 100644 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp +++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp @@ -37,9 +37,8 @@ #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" -using namespace arm_compute; -using namespace arm_compute::misc::shape_calculator; - +namespace arm_compute +{ namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, @@ -54,11 +53,24 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, "For QASYMM8 only logistic, relu, lower bounded relu and lower-upper bounded relu are supported"); //COMPMID-1317 add fused activation for F32 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); ARM_COMPUTE_RETURN_ERROR_ON(depth_multiplier > 1); // COMPMID-1071 Add depth multiplier support for NHWC - ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(1) != 3 || weights->dimension(2) != 3); - ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1 || conv_info.stride().first > 2); + + ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1); ARM_COMPUTE_RETURN_ERROR_ON(std::max(conv_info.pad_top(), conv_info.pad_bottom()) > 1); - const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type()); + const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type()); + const size_t weights_width = 3; + const size_t weights_height = 3; + + if(is_qasymm) + { + DepthwiseConvolutionReshapeInfo info; + info.c0 = 4; + ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(0) / info.c0) != weights_width * weights_height); + } + else + { + ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(1) != weights_width) || (weights->dimension(2) != weights_height)); + } if(biases != nullptr) { @@ -68,15 +80,16 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, } else { + ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(0)); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases); } - ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != weights->dimension(0)); + ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1); } if(output->total_size() != 0) { - const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, conv_info, depth_multiplier); + const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(*input, weights_width, weights_height, conv_info, depth_multiplier); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape); } @@ -84,10 +97,13 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, } std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *output, - const PadStrideInfo &conv_info) + const PadStrideInfo &conv_info, unsigned int depth_multiplier) { + const size_t weights_width = 3; + const size_t weights_height = 3; + // Get convolved dimensions - const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, conv_info, 1 /* depth_multiplier */); + const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape(*input, weights_width, weights_height, conv_info, depth_multiplier); // Output auto inizialitation if not yet initialized auto_init_if_empty(*output, @@ -113,9 +129,18 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen AccessWindowStatic input_access(input, 0, -border_size.top, ceil_to_multiple(input->dimension(0), num_elems_accessed_per_iteration), ceil_to_multiple(input->dimension(1) + border_size.bottom, num_rows_read_per_iteration)); AccessWindowRectangle output_access(output, 0, 0, num_elems_accessed_per_iteration, num_rows_written_per_iteration); - AccessWindowStatic weights_access(weights, 0, 0, ceil_to_multiple(weights->dimension(0), num_elems_accessed_per_iteration), weights->dimension(1)); - bool window_changed = update_window_and_padding(win, input_access, weights_access, output_access); + bool window_changed = false; + + if(is_qasymm) + { + window_changed = update_window_and_padding(win, input_access, output_access); + } + else + { + AccessWindowStatic weights_access(weights, 0, 0, ceil_to_multiple(weights->dimension(0), num_elems_accessed_per_iteration), weights->dimension(1)); + window_changed = update_window_and_padding(win, input_access, weights_access, output_access); + } if(bias != nullptr) { @@ -144,18 +169,9 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input, unsigned int depth_multiplier, ActivationLayerInfo act_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); - - // Get convolved dimensions - const TensorShape output_shape = compute_depthwise_convolution_shape(*input->info(), *weights->info(), conv_info, depth_multiplier); - - // Output auto inizialitation if not yet initialized - auto_init_if_empty(*output->info(), - output_shape, - 1, - input->info()->data_type(), - input->info()->quantization_info()); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info, depth_multiplier, act_info)); + auto win_config = validate_and_configure_window(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info, depth_multiplier); + ARM_COMPUTE_ERROR_THROW_ON(win_config.first); const bool is_qasymm = is_data_type_quantized_asymmetric(input->info()->data_type()); const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1)); @@ -250,13 +266,8 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input, // Create kernel std::string kernel_name = std::string("depthwise_convolution_3x3") + (is_qasymm ? std::string("_quantized") + ((is_dot8_supported && is_stride_1) ? "_dot8" : "") : "") + "_nhwc" + (is_stride_1 ? "_stride1" : ""); - - _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); - - // Configure kernel window - auto win_config = validate_and_configure_window(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), conv_info); - ARM_COMPUTE_ERROR_THROW_ON(win_config.first); ICLKernel::configure_internal(win_config.second); + _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); // Set config_id for enabling LWS tuning _config_id = kernel_name; @@ -281,7 +292,7 @@ Status CLDepthwiseConvolutionLayer3x3NHWCKernel::validate(const ITensorInfo *inp ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info, depth_multiplier, act_info)); ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), biases != nullptr ? biases->clone().get() : nullptr, - output->clone().get(), conv_info) + output->clone().get(), conv_info, depth_multiplier) .first); return Status{}; @@ -295,6 +306,7 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::run(const Window &window, cl::Com // Collapse window Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); const size_t total_batches = _input->info()->tensor_shape().total_size_upper(3); + const bool is_qasymm = is_data_type_quantized_asymmetric(_input->info()->data_type()); Window win = window_collapsed; win.set(Window::DimZ, Window::Dimension(0, std::ceil(_output->info()->dimension(2) / static_cast(_num_planes_processed_per_iteration)) * total_batches, 1)); @@ -309,7 +321,7 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::run(const Window &window, cl::Com Window slice_in = win_in.first_slice_window_4D(); Window slice_out = win.first_slice_window_4D(); - unsigned int idx = 2 * num_arguments_per_4D_tensor() + num_arguments_per_3D_tensor(); + unsigned int idx = 2 * num_arguments_per_4D_tensor() + (is_qasymm ? num_arguments_per_2D_tensor() : num_arguments_per_3D_tensor()); if(_biases != nullptr) { @@ -328,9 +340,16 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::run(const Window &window, cl::Com unsigned int idx = 0; add_4D_tensor_argument(idx, _input, slice_in); add_4D_tensor_argument(idx, _output, slice_out); - add_3D_tensor_argument(idx, _weights, slice_out); - + if(is_qasymm) + { + add_2D_tensor_argument(idx, _weights, slice_out); + } + else + { + add_3D_tensor_argument(idx, _weights, slice_out); + } enqueue(queue, *this, slice_out, lws_hint()); } while(win.slide_window_slice_4D(slice_out) && win_in.slide_window_slice_4D(slice_in)); } +} // namespace arm_compute diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.cpp new file mode 100644 index 0000000000..4432ce5605 --- /dev/null +++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.cpp @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2017-2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel.h" + +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/CL/CLValidate.h" +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/CL/OpenCL.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/Types.h" +#include "support/ToolchainSupport.h" + +using namespace arm_compute; + +namespace +{ +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *biases) +{ + const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); + const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL); + + ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(input->data_type()) && (biases != nullptr)); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_c) != output->dimension(1)); + ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(0) != (input->dimension(idx_w) * input->dimension(idx_h) + ((biases != nullptr) ? 1 : 0))); + + if(biases != nullptr) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases); + ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != input->dimension(idx_c)); + ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1); + } + + return Status{}; +} +} // namespace + +CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel::CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel() + : _input(nullptr), _biases(nullptr), _output(nullptr) +{ +} + +void CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *biases) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), (biases != nullptr) ? biases->info() : nullptr)); + + _input = input; + _biases = biases; + _output = output; + + const size_t idx_w = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH); + + // Create kernel + std::set build_opts; + + build_opts.emplace("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); + build_opts.emplace("-DSRC_WIDTH=" + support::cpp11::to_string(input->info()->dimension(idx_w))); + build_opts.emplace("-D" + string_from_data_layout(input->info()->data_layout())); + if(_biases != nullptr) + { + build_opts.emplace("-DHAS_BIAS"); + } + + _kernel = static_cast(CLKernelLibrary::get().create_kernel("depthwise_convolution_reshape_weights_generic", build_opts)); + + // Configure kernel window + Window win = calculate_max_window(*input->info(), Steps()); + // The CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel doesn't need padding so update_window_and_padding() can be skipped + output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape())); + + ICLKernel::configure_internal(win); +} + +Status CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *biases) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, biases)); + return Status{}; +} + +void CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel::run(const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window); + + Window slice = window.first_slice_window_3D(); + Window slice_out = window.first_slice_window_2D(); + + const size_t idx_w = get_data_layout_dimension_index(_input->info()->data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(_input->info()->data_layout(), DataLayoutDimension::HEIGHT); + const size_t idx_c = get_data_layout_dimension_index(_input->info()->data_layout(), DataLayoutDimension::CHANNEL); + + // Setup slice + slice.set(Window::DimX, Window::Dimension(0, _input->info()->dimension(idx_w), _input->info()->dimension(idx_w))); + slice.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(idx_h), 1)); + slice.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(idx_c), 1)); + + // Setup output slice + // The first two dimensions of the output are increased by the inner loops + slice_out.set(Window::DimX, Window::Dimension(0, 0, 0)); + slice_out.set(Window::DimY, Window::Dimension(0, 0, 0)); + + // Set biases + if(_biases != nullptr) + { + unsigned int idx = num_arguments_per_3D_tensor() + num_arguments_per_2D_tensor(); + Window slice_biases; + slice_biases.use_tensor_dimensions(_biases->info()->tensor_shape()); + add_1D_tensor_argument(idx, _biases, slice_biases); + } + + do + { + unsigned int idx = 0; + add_3D_tensor_argument(idx, _input, slice); + add_2D_tensor_argument(idx, _output, slice_out); + enqueue(queue, *this, slice); + } + while(window.slide_window_slice_3D(slice) && window.slide_window_slice_2D(slice_out)); +} diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp new file mode 100644 index 0000000000..608181dad1 --- /dev/null +++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h" + +#include "arm_compute/core/AccessWindowStatic.h" +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/CL/CLValidate.h" +#include "arm_compute/core/CL/ICLKernel.h" +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/core/utils/quantization/AsymmHelpers.h" + +namespace arm_compute +{ +namespace +{ +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const DepthwiseConvolutionReshapeInfo &info) +{ + const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); + + ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NHWC); + ARM_COMPUTE_RETURN_ERROR_ON(info.c0 != 4); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_h) != 3); + ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_w) != 3); + + if(output->total_size() != 0) + { + auto reshaped_weights_shape = arm_compute::misc::shape_calculator::compute_reshaped_depthwise_weights_shape(*input, info); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), reshaped_weights_shape); + } + + return Status{}; +} + +std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const DepthwiseConvolutionReshapeInfo &info) +{ + auto reshaped_input_shape = arm_compute::misc::shape_calculator::compute_reshaped_depthwise_weights_shape(*input, info); + auto_init_if_empty(*output, reshaped_input_shape, 1, input->data_type(), input->quantization_info()); + + Window win = calculate_max_window(*input, Steps(info.c0)); + AccessWindowHorizontal weights_access(input, 0, info.c0); + const bool window_changed = update_window_and_padding(win, weights_access); + + output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape())); + Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; + return std::make_pair(err, win); +} +} // namespace + +CLDepthwiseConvolutionLayerReshapeWeightsKernel::CLDepthwiseConvolutionLayerReshapeWeightsKernel() + : _input(nullptr), _output(nullptr) +{ +} + +void CLDepthwiseConvolutionLayerReshapeWeightsKernel::configure(const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), info)); + auto win_config = validate_and_configure_window(input->info(), output->info(), info); + ARM_COMPUTE_ERROR_THROW_ON(win_config.first); + + ICLKernel::configure_internal(win_config.second); + + _input = input; + _output = output; + + // Build the kernel + CLBuildOptions build_opts; + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(_input->info()->data_type())); + build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(info.c0)); + build_opts.add_option("-DDST_WIDTH=" + support::cpp11::to_string(_output->info()->dimension(0))); + build_opts.add_option_if(info.transpose, "-DTRANSPOSE"); + + _kernel = static_cast(CLKernelLibrary::get().create_kernel("depthwise_convolution_reshape_weights", build_opts.options())); +} + +Status CLDepthwiseConvolutionLayerReshapeWeightsKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const DepthwiseConvolutionReshapeInfo &info) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), info).first); + return Status{}; +} + +void CLDepthwiseConvolutionLayerReshapeWeightsKernel::run(const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); + + unsigned int idx = 0; + add_3D_tensor_argument(idx, _input, window); + add_2D_tensor_argument(idx, _output, window); + enqueue(queue, *this, window); +} +} // namespace arm_compute diff --git a/src/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.cpp b/src/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.cpp deleted file mode 100644 index 683dda8d67..0000000000 --- a/src/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.cpp +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright (c) 2017-2018 ARM Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/core/CL/kernels/CLDepthwiseWeightsReshapeKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibrary.h" -#include "arm_compute/core/CL/CLValidate.h" -#include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/CL/OpenCL.h" -#include "arm_compute/core/Error.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/Types.h" -#include "support/ToolchainSupport.h" - -using namespace arm_compute; - -namespace -{ -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *biases) -{ - const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); - const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); - const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL); - - ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(input->data_type()) && (biases != nullptr)); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_c) != output->dimension(1)); - ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(0) != (input->dimension(idx_w) * input->dimension(idx_h) + ((biases != nullptr) ? 1 : 0))); - - if(biases != nullptr) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, biases); - ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != input->dimension(idx_c)); - ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1); - } - - return Status{}; -} -} // namespace - -CLDepthwiseWeightsReshapeKernel::CLDepthwiseWeightsReshapeKernel() - : _input(nullptr), _biases(nullptr), _output(nullptr) -{ -} - -void CLDepthwiseWeightsReshapeKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *biases) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), (biases != nullptr) ? biases->info() : nullptr)); - - _input = input; - _biases = biases; - _output = output; - - const size_t idx_w = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH); - - // Create kernel - std::set build_opts; - - build_opts.emplace("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); - build_opts.emplace("-DSRC_WIDTH=" + support::cpp11::to_string(input->info()->dimension(idx_w))); - build_opts.emplace("-D" + string_from_data_layout(input->info()->data_layout())); - if(_biases != nullptr) - { - build_opts.emplace("-DHAS_BIAS"); - } - - _kernel = static_cast(CLKernelLibrary::get().create_kernel("depthwise_weights_reshape", build_opts)); - - // Configure kernel window - Window win = calculate_max_window(*input->info(), Steps()); - // The CLDepthwiseWeightsReshapeKernel doesn't need padding so update_window_and_padding() can be skipped - output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape())); - - ICLKernel::configure_internal(win); -} - -Status CLDepthwiseWeightsReshapeKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *biases) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, biases)); - return Status{}; -} - -void CLDepthwiseWeightsReshapeKernel::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window); - - Window slice = window.first_slice_window_3D(); - Window slice_out = window.first_slice_window_2D(); - - const size_t idx_w = get_data_layout_dimension_index(_input->info()->data_layout(), DataLayoutDimension::WIDTH); - const size_t idx_h = get_data_layout_dimension_index(_input->info()->data_layout(), DataLayoutDimension::HEIGHT); - const size_t idx_c = get_data_layout_dimension_index(_input->info()->data_layout(), DataLayoutDimension::CHANNEL); - - // Setup slice - slice.set(Window::DimX, Window::Dimension(0, _input->info()->dimension(idx_w), _input->info()->dimension(idx_w))); - slice.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(idx_h), 1)); - slice.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(idx_c), 1)); - - // Setup output slice - // The first two dimensions of the output are increased by the inner loops - slice_out.set(Window::DimX, Window::Dimension(0, 0, 0)); - slice_out.set(Window::DimY, Window::Dimension(0, 0, 0)); - - // Set biases - if(_biases != nullptr) - { - unsigned int idx = num_arguments_per_3D_tensor() + num_arguments_per_2D_tensor(); - Window slice_biases; - slice_biases.use_tensor_dimensions(_biases->info()->tensor_shape()); - add_1D_tensor_argument(idx, _biases, slice_biases); - } - - do - { - unsigned int idx = 0; - add_3D_tensor_argument(idx, _input, slice); - add_2D_tensor_argument(idx, _output, slice_out); - enqueue(queue, *this, slice); - } - while(window.slide_window_slice_3D(slice) && window.slide_window_slice_2D(slice_out)); -} diff --git a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp index be13f500ea..15cbfcedfb 100644 --- a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -33,13 +33,14 @@ #include "arm_compute/runtime/CL/CLScheduler.h" #include "support/ToolchainSupport.h" -using namespace arm_compute; +namespace arm_compute +{ using namespace arm_compute::misc; using namespace arm_compute::misc::shape_calculator; CLDepthwiseConvolutionLayer3x3::CLDepthwiseConvolutionLayer3x3(std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), _kernel(nullptr), _border_handler(), _permute_input_to_nchw(), _permute_weights_to_nchw(), _permute_output_to_nhwc(), _permuted_input(), - _permuted_weights(), _permuted_output(), _original_weights(nullptr), _needs_permute(false), _is_prepared(false) + : _memory_group(std::move(memory_manager)), _kernel(nullptr), _border_handler(), _permute_input_to_nchw(), _permute_weights_to_nchw(), _permute_output_to_nhwc(), _reshape_weights(), _permuted_input(), + _permuted_weights(), _permuted_output(), _original_weights(nullptr), _needs_permute(false), _needs_weights_reshape(false), _is_prepared(false) { } @@ -51,7 +52,9 @@ void CLDepthwiseConvolutionLayer3x3::configure(ICLTensor *input, const ICLTensor const bool is_nhwc = input->info()->data_layout() == DataLayout::NHWC; - _needs_permute = is_nhwc && (depth_multiplier > 1); + _needs_permute = is_nhwc && (depth_multiplier > 1); + _needs_weights_reshape = is_nhwc && (depth_multiplier == 1) + && is_data_type_quantized_asymmetric(input->info()->data_type()); _is_prepared = false; _original_weights = weights; @@ -59,6 +62,12 @@ void CLDepthwiseConvolutionLayer3x3::configure(ICLTensor *input, const ICLTensor const ICLTensor *weights_to_use = weights; ICLTensor *output_to_use = output; + const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1)); + const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device()); + DepthwiseConvolutionReshapeInfo info; + info.c0 = 4; + info.transpose = is_stride_1 && is_dot8_supported; + if(_needs_permute) { _memory_group.manage(&_permuted_input); @@ -80,6 +89,11 @@ void CLDepthwiseConvolutionLayer3x3::configure(ICLTensor *input, const ICLTensor } else if(is_nhwc) { + if(_needs_weights_reshape) + { + _reshape_weights.configure(weights, &_permuted_weights, info); + weights_to_use = &_permuted_weights; + } _kernel = arm_compute::support::cpp14::make_unique(); } else @@ -102,7 +116,6 @@ void CLDepthwiseConvolutionLayer3x3::configure(ICLTensor *input, const ICLTensor _permuted_input.allocator()->allocate(); _permuted_output.allocator()->allocate(); } - // Configure border handler PixelValue &&zero_value(0.f); if(is_data_type_quantized_asymmetric(input->info()->data_type())) @@ -119,8 +132,14 @@ Status CLDepthwiseConvolutionLayer3x3::validate(const ITensorInfo *input, const ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); ARM_COMPUTE_RETURN_ERROR_ON(input->data_layout() == DataLayout::UNKNOWN); - const bool is_nhwc = input->data_layout() == DataLayout::NHWC; - const bool needs_permute = is_nhwc && (depth_multiplier > 1); + const bool is_nhwc = input->data_layout() == DataLayout::NHWC; + const bool needs_permute = is_nhwc && (depth_multiplier > 1); + const bool needs_weights_reshape = is_nhwc && (depth_multiplier == 1); + const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1)); + const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device()); + DepthwiseConvolutionReshapeInfo info; + info.c0 = 4; + info.transpose = is_stride_1 && is_dot8_supported; if(needs_permute) { @@ -140,6 +159,12 @@ Status CLDepthwiseConvolutionLayer3x3::validate(const ITensorInfo *input, const } else if(is_nhwc) { + if(needs_weights_reshape) + { + auto reshaped_weights_shape = arm_compute::misc::shape_calculator::compute_reshaped_depthwise_weights_shape(*weights, info); + ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayer3x3NHWCKernel::validate(input, &weights->clone()->set_tensor_shape(reshaped_weights_shape), biases, output, conv_info, depth_multiplier, + act_info)); + } ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayer3x3NHWCKernel::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info)); } else @@ -183,6 +208,15 @@ void CLDepthwiseConvolutionLayer3x3::prepare() _permute_weights_to_nchw.run(); _original_weights->mark_as_unused(); } + + if(_needs_weights_reshape) + { + ARM_COMPUTE_ERROR_ON(_needs_permute); + ARM_COMPUTE_ERROR_ON(!_original_weights->is_used()); + _permuted_weights.allocator()->allocate(); + CLScheduler::get().enqueue(_reshape_weights); + _original_weights->mark_as_unused(); + } _is_prepared = true; } } @@ -201,11 +235,11 @@ void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *w ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output); - const Status can_run_optimised_3x3_kernel = CLDepthwiseConvolutionLayer3x3::validate(input->info(), - weights->info(), - biases != nullptr ? biases->info() : nullptr, - output->info(), - conv_info, depth_multiplier, act_info); + const size_t idx_w = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT); + + const bool can_run_optimised_3x3_kernel = (weights->info()->dimension(idx_w) == 3) && (weights->info()->dimension(idx_h) == 3); + if(bool(can_run_optimised_3x3_kernel)) { auto f = arm_compute::support::cpp14::make_unique(); @@ -214,8 +248,6 @@ void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *w } else { - const size_t idx_w = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH); - const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT); const size_t idx_c = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::CHANNEL); const size_t weights_w = weights->info()->dimension(idx_w); @@ -315,10 +347,13 @@ void CLDepthwiseConvolutionLayer::configure(ICLTensor *input, const ICLTensor *w Status CLDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier, const ActivationLayerInfo &act_info) { - if(!bool(CLDepthwiseConvolutionLayer3x3::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info))) + const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); + const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); + + const bool can_run_optimised_3x3_kernel = (weights->dimension(idx_w) == 3) && (weights->dimension(idx_h) == 3); + + if(can_run_optimised_3x3_kernel) { - const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); - const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); const size_t idx_c = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); @@ -344,7 +379,7 @@ Status CLDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITe const TensorShape shape_weights_reshape(patch_size, weights_z); TensorInfo weights_reshaped(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_weights_reshape)); - ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseWeightsReshapeKernel::validate(weights, &weights_reshaped, append_bias ? biases : nullptr)); + ARM_COMPUTE_RETURN_ON_ERROR(CLDepthwiseConvolutionLayerReshapeWeightsGenericKernel::validate(weights, &weights_reshaped, append_bias ? biases : nullptr)); DataType v2mm_dt = (input->data_type() == DataType::QASYMM8) ? DataType::S32 : input->data_type(); TensorShape shape_v2mm_out = input->tensor_shape(); @@ -368,6 +403,10 @@ Status CLDepthwiseConvolutionLayer::validate(const ITensorInfo *input, const ITe ARM_COMPUTE_RETURN_ON_ERROR(CLActivationLayer::validate(output, nullptr, act_info)); } } + else + { + CLDepthwiseConvolutionLayer3x3::validate(input, weights, biases, output, conv_info, depth_multiplier, act_info); + } return Status{}; } @@ -419,3 +458,4 @@ void CLDepthwiseConvolutionLayer::prepare() } } } +} // namespace arm_compute -- cgit v1.2.1