From dcf4c87cf78a5f1667699c1a3511d09356938660 Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Fri, 16 Apr 2021 12:41:45 +0100 Subject: CLDepthwiseConvolutionLayer rework - Part 1 Remove the reshaped variant for CLDepthwiseConvolutionLayer 3x3 NHWC Quantized - Remove kernel selection by GPUTarget - Remove unused quantized support from the NHWC kernel - Remove CLDepthwiseConvolutionLayerReshapeWeightsKernel - Remove OpenCL kernels for reshaped dwc 3x3 quantized and weights reshape - Remove the "_bifrost" suffix in common OpenCL kernel - Remove the ICLDepthwiseConvolutionLayer3x3Kernel common interface Resolve COMPMID-3864, COMPMID-3907 Change-Id: Icfac0fb6c00e214985beb05dad7c0cdbbee7d830 Signed-off-by: Giorgio Arena Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5447 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- src/core/CL/CLKernelLibrary.cpp | 12 +- src/core/CL/CLKernels.h | 2 - src/core/CL/cl_kernels/depthwise_convolution.cl | 420 ++++------ .../cl_kernels/depthwise_convolution_quantized.cl | 847 +-------------------- .../CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp | 53 +- .../CLDepthwiseConvolutionLayer3x3NCHWKernel.h | 31 +- .../CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp | 298 +------- .../CLDepthwiseConvolutionLayer3x3NHWCKernel.h | 103 ++- ...pthwiseConvolutionLayerReshapeWeightsKernel.cpp | 131 ---- ...DepthwiseConvolutionLayerReshapeWeightsKernel.h | 85 --- src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp | 2 - .../ICLDepthwiseConvolutionLayer3x3Kernel.h | 105 --- 12 files changed, 308 insertions(+), 1781 deletions(-) delete mode 100644 src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp delete mode 100644 src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h delete mode 100644 src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h (limited to 'src/core/CL') diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 002a14400f..33b5ad0c6e 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -231,16 +231,12 @@ const std::map CLKernelLibrary::_kernel_program_map = { "dwc_MxN_native_quantized8_nhwc", "depthwise_convolution_quantized.cl" }, { "dwc_3x3_native_quantized8_nchw", "depthwise_convolution_quantized.cl" }, { "dwc_3x3_native_quantized8_dot8_nchw", "depthwise_convolution_quantized.cl" }, - { "dwc_3x3_reshaped_quantized8_nhwc", "depthwise_convolution_quantized.cl" }, - { "dwc_3x3_reshaped_quantized8_stride1_nhwc", "depthwise_convolution_quantized.cl" }, - { "dwc_3x3_reshaped_quantized8_dot8_stride1_nhwc", "depthwise_convolution_quantized.cl" }, { "depth_to_space_nchw", "depth_to_space.cl" }, { "depth_to_space_nhwc", "depth_to_space.cl" }, - { "depthwise_convolution_3x3_stridex1_stridey1_bifrost_f16", "depthwise_convolution.cl" }, - { "depthwise_convolution_3x3_stridex2_stridey2_bifrost_f16", "depthwise_convolution.cl" }, - { "depthwise_convolution_3x3_stridex1_stridey1_bifrost_f32", "depthwise_convolution.cl" }, - { "depthwise_convolution_3x3_stridex2_stridey2_bifrost_f32", "depthwise_convolution.cl" }, - { "depthwise_convolution_reshape_weights", "depthwise_convolution.cl" }, + { "depthwise_convolution_3x3_stridex1_stridey1_f16", "depthwise_convolution.cl" }, + { "depthwise_convolution_3x3_stridex2_stridey2_f16", "depthwise_convolution.cl" }, + { "depthwise_convolution_3x3_stridex1_stridey1_f32", "depthwise_convolution.cl" }, + { "depthwise_convolution_3x3_stridex2_stridey2_f32", "depthwise_convolution.cl" }, { "dequantization_layer", "dequantization_layer.cl" }, { "dequantization_layer_per_channel_nhwc", "dequantization_layer.cl" }, { "dequantization_layer_per_channel_nchw", "dequantization_layer.cl" }, diff --git a/src/core/CL/CLKernels.h b/src/core/CL/CLKernels.h index f29c768fa3..63978cea3f 100644 --- a/src/core/CL/CLKernels.h +++ b/src/core/CL/CLKernels.h @@ -40,7 +40,6 @@ #include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h" #include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h" #include "src/core/CL/kernels/CLDepthwiseConvolutionLayerNativeKernel.h" -#include "src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h" #include "src/core/CL/kernels/CLFFTDigitReverseKernel.h" #include "src/core/CL/kernels/CLFFTRadixStageKernel.h" #include "src/core/CL/kernels/CLFFTScaleKernel.h" @@ -91,6 +90,5 @@ #include "src/core/CL/kernels/CLWinogradFilterTransformKernel.h" #include "src/core/CL/kernels/CLWinogradInputTransformKernel.h" #include "src/core/CL/kernels/CLWinogradOutputTransformKernel.h" -#include "src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h" #endif /* ARM_COMPUTE_CLKERNELS_H */ diff --git a/src/core/CL/cl_kernels/depthwise_convolution.cl b/src/core/CL/cl_kernels/depthwise_convolution.cl index 8ce5617858..22a38e7094 100644 --- a/src/core/CL/cl_kernels/depthwise_convolution.cl +++ b/src/core/CL/cl_kernels/depthwise_convolution.cl @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -42,110 +42,110 @@ inline __global uchar *ptr_offset(__global uchar *ptr, const int x, const int y, #if(DILATION_X == 1 && DILATION_Y == 1) -#define CONVOLUTION1x3_BIFROST2X1_STRIDE1(acc, src0, weights_row0) \ - ({ \ - acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \ - acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \ - acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \ - acc.s1 = fma(src0.s1, weights_row0.s0, acc.s1); \ - acc.s1 = fma(src0.s2, weights_row0.s1, acc.s1); \ - acc.s1 = fma(src0.s3, weights_row0.s2, acc.s1); \ +#define CONVOLUTION1x3_2X1_STRIDE1(acc, src0, weights_row0) \ + ({ \ + acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \ + acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \ + acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \ + acc.s1 = fma(src0.s1, weights_row0.s0, acc.s1); \ + acc.s1 = fma(src0.s2, weights_row0.s1, acc.s1); \ + acc.s1 = fma(src0.s3, weights_row0.s2, acc.s1); \ }) -#define CONVOLUTION1x3_BIFROST4X1_STRIDE1(acc, src0, weights_row0) \ - ({ \ - acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \ - acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \ - acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \ - acc.s1 = fma(src0.s1, weights_row0.s0, acc.s1); \ - acc.s1 = fma(src0.s2, weights_row0.s1, acc.s1); \ - acc.s1 = fma(src0.s3, weights_row0.s2, acc.s1); \ - acc.s2 = fma(src0.s2, weights_row0.s0, acc.s2); \ - acc.s2 = fma(src0.s3, weights_row0.s1, acc.s2); \ - acc.s2 = fma(src0.s4, weights_row0.s2, acc.s2); \ - acc.s3 = fma(src0.s3, weights_row0.s0, acc.s3); \ - acc.s3 = fma(src0.s4, weights_row0.s1, acc.s3); \ - acc.s3 = fma(src0.s5, weights_row0.s2, acc.s3); \ +#define CONVOLUTION1x3_4X1_STRIDE1(acc, src0, weights_row0) \ + ({ \ + acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \ + acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \ + acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \ + acc.s1 = fma(src0.s1, weights_row0.s0, acc.s1); \ + acc.s1 = fma(src0.s2, weights_row0.s1, acc.s1); \ + acc.s1 = fma(src0.s3, weights_row0.s2, acc.s1); \ + acc.s2 = fma(src0.s2, weights_row0.s0, acc.s2); \ + acc.s2 = fma(src0.s3, weights_row0.s1, acc.s2); \ + acc.s2 = fma(src0.s4, weights_row0.s2, acc.s2); \ + acc.s3 = fma(src0.s3, weights_row0.s0, acc.s3); \ + acc.s3 = fma(src0.s4, weights_row0.s1, acc.s3); \ + acc.s3 = fma(src0.s5, weights_row0.s2, acc.s3); \ }) -#define CONVOLUTION1x3_BIFROST2X1_STRIDE2(acc, src0, src1, weights_row0) \ - ({ \ - acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \ - acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \ - acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \ - acc.s1 = fma(src0.s2, weights_row0.s0, acc.s1); \ - acc.s1 = fma(src0.s3, weights_row0.s1, acc.s1); \ - acc.s1 = fma(src1.s0, weights_row0.s2, acc.s1); \ +#define CONVOLUTION1x3_2X1_STRIDE2(acc, src0, src1, weights_row0) \ + ({ \ + acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \ + acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \ + acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \ + acc.s1 = fma(src0.s2, weights_row0.s0, acc.s1); \ + acc.s1 = fma(src0.s3, weights_row0.s1, acc.s1); \ + acc.s1 = fma(src1.s0, weights_row0.s2, acc.s1); \ }) -#define CONVOLUTION1x3_BIFROST4X1_STRIDE2(acc, src0, src1, weights_row0) \ - ({ \ - acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \ - acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \ - acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \ - acc.s1 = fma(src0.s2, weights_row0.s0, acc.s1); \ - acc.s1 = fma(src0.s3, weights_row0.s1, acc.s1); \ - acc.s1 = fma(src0.s4, weights_row0.s2, acc.s1); \ - acc.s2 = fma(src0.s4, weights_row0.s0, acc.s2); \ - acc.s2 = fma(src0.s5, weights_row0.s1, acc.s2); \ - acc.s2 = fma(src0.s6, weights_row0.s2, acc.s2); \ - acc.s3 = fma(src0.s6, weights_row0.s0, acc.s3); \ - acc.s3 = fma(src0.s7, weights_row0.s1, acc.s3); \ - acc.s3 = fma(src1.s0, weights_row0.s2, acc.s3); \ +#define CONVOLUTION1x3_4X1_STRIDE2(acc, src0, src1, weights_row0) \ + ({ \ + acc.s0 = fma(src0.s0, weights_row0.s0, acc.s0); \ + acc.s0 = fma(src0.s1, weights_row0.s1, acc.s0); \ + acc.s0 = fma(src0.s2, weights_row0.s2, acc.s0); \ + acc.s1 = fma(src0.s2, weights_row0.s0, acc.s1); \ + acc.s1 = fma(src0.s3, weights_row0.s1, acc.s1); \ + acc.s1 = fma(src0.s4, weights_row0.s2, acc.s1); \ + acc.s2 = fma(src0.s4, weights_row0.s0, acc.s2); \ + acc.s2 = fma(src0.s5, weights_row0.s1, acc.s2); \ + acc.s2 = fma(src0.s6, weights_row0.s2, acc.s2); \ + acc.s3 = fma(src0.s6, weights_row0.s0, acc.s3); \ + acc.s3 = fma(src0.s7, weights_row0.s1, acc.s3); \ + acc.s3 = fma(src1.s0, weights_row0.s2, acc.s3); \ }) #else /* DILATION_X==1 && DILATION_Y==1 */ -#define CONVOLUTION1x3_BIFROST2X1_STRIDE1(acc, src0_left, src0_mid, src0_right, weights_row0) \ - ({ \ - acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \ - acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \ - acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \ - acc.s1 = fma(src0_left.s1, weights_row0.s0, acc.s1); \ - acc.s1 = fma(src0_mid.s1, weights_row0.s1, acc.s1); \ - acc.s1 = fma(src0_right.s1, weights_row0.s2, acc.s1); \ +#define CONVOLUTION1x3_2X1_STRIDE1(acc, src0_left, src0_mid, src0_right, weights_row0) \ + ({ \ + acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \ + acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \ + acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \ + acc.s1 = fma(src0_left.s1, weights_row0.s0, acc.s1); \ + acc.s1 = fma(src0_mid.s1, weights_row0.s1, acc.s1); \ + acc.s1 = fma(src0_right.s1, weights_row0.s2, acc.s1); \ }) -#define CONVOLUTION1x3_BIFROST2X1_STRIDE2(acc, src0_left, src0_mid, src0_right, weights_row0) \ - ({ \ - acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \ - acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \ - acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \ - acc.s1 = fma(src0_left.s2, weights_row0.s0, acc.s1); \ - acc.s1 = fma(src0_mid.s2, weights_row0.s1, acc.s1); \ - acc.s1 = fma(src0_right.s2, weights_row0.s2, acc.s1); \ +#define CONVOLUTION1x3_2X1_STRIDE2(acc, src0_left, src0_mid, src0_right, weights_row0) \ + ({ \ + acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \ + acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \ + acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \ + acc.s1 = fma(src0_left.s2, weights_row0.s0, acc.s1); \ + acc.s1 = fma(src0_mid.s2, weights_row0.s1, acc.s1); \ + acc.s1 = fma(src0_right.s2, weights_row0.s2, acc.s1); \ }) -#define CONVOLUTION1x3_BIFROST4X1_STRIDE1(acc, src0_left, src0_mid, src0_right, weights_row0) \ - ({ \ - acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \ - acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \ - acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \ - acc.s1 = fma(src0_left.s1, weights_row0.s0, acc.s1); \ - acc.s1 = fma(src0_mid.s1, weights_row0.s1, acc.s1); \ - acc.s1 = fma(src0_right.s1, weights_row0.s2, acc.s1); \ - acc.s2 = fma(src0_left.s2, weights_row0.s0, acc.s2); \ - acc.s2 = fma(src0_mid.s2, weights_row0.s1, acc.s2); \ - acc.s2 = fma(src0_right.s2, weights_row0.s2, acc.s2); \ - acc.s3 = fma(src0_left.s3, weights_row0.s0, acc.s3); \ - acc.s3 = fma(src0_mid.s3, weights_row0.s1, acc.s3); \ - acc.s3 = fma(src0_right.s3, weights_row0.s2, acc.s3); \ +#define CONVOLUTION1x3_4X1_STRIDE1(acc, src0_left, src0_mid, src0_right, weights_row0) \ + ({ \ + acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \ + acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \ + acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \ + acc.s1 = fma(src0_left.s1, weights_row0.s0, acc.s1); \ + acc.s1 = fma(src0_mid.s1, weights_row0.s1, acc.s1); \ + acc.s1 = fma(src0_right.s1, weights_row0.s2, acc.s1); \ + acc.s2 = fma(src0_left.s2, weights_row0.s0, acc.s2); \ + acc.s2 = fma(src0_mid.s2, weights_row0.s1, acc.s2); \ + acc.s2 = fma(src0_right.s2, weights_row0.s2, acc.s2); \ + acc.s3 = fma(src0_left.s3, weights_row0.s0, acc.s3); \ + acc.s3 = fma(src0_mid.s3, weights_row0.s1, acc.s3); \ + acc.s3 = fma(src0_right.s3, weights_row0.s2, acc.s3); \ }) -#define CONVOLUTION1x3_BIFROST4X1_STRIDE2(acc, src0_left, src0_mid, src0_right, weights_row0) \ - ({ \ - acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \ - acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \ - acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \ - acc.s1 = fma(src0_left.s2, weights_row0.s0, acc.s1); \ - acc.s1 = fma(src0_mid.s2, weights_row0.s1, acc.s1); \ - acc.s1 = fma(src0_right.s2, weights_row0.s2, acc.s1); \ - acc.s2 = fma(src0_left.s4, weights_row0.s0, acc.s2); \ - acc.s2 = fma(src0_mid.s4, weights_row0.s1, acc.s2); \ - acc.s2 = fma(src0_right.s4, weights_row0.s2, acc.s2); \ - acc.s3 = fma(src0_left.s6, weights_row0.s0, acc.s3); \ - acc.s3 = fma(src0_mid.s6, weights_row0.s1, acc.s3); \ - acc.s3 = fma(src0_right.s6, weights_row0.s2, acc.s3); \ +#define CONVOLUTION1x3_4X1_STRIDE2(acc, src0_left, src0_mid, src0_right, weights_row0) \ + ({ \ + acc.s0 = fma(src0_left.s0, weights_row0.s0, acc.s0); \ + acc.s0 = fma(src0_mid.s0, weights_row0.s1, acc.s0); \ + acc.s0 = fma(src0_right.s0, weights_row0.s2, acc.s0); \ + acc.s1 = fma(src0_left.s2, weights_row0.s0, acc.s1); \ + acc.s1 = fma(src0_mid.s2, weights_row0.s1, acc.s1); \ + acc.s1 = fma(src0_right.s2, weights_row0.s2, acc.s1); \ + acc.s2 = fma(src0_left.s4, weights_row0.s0, acc.s2); \ + acc.s2 = fma(src0_mid.s4, weights_row0.s1, acc.s2); \ + acc.s2 = fma(src0_right.s4, weights_row0.s2, acc.s2); \ + acc.s3 = fma(src0_left.s6, weights_row0.s0, acc.s3); \ + acc.s3 = fma(src0_mid.s6, weights_row0.s1, acc.s3); \ + acc.s3 = fma(src0_right.s6, weights_row0.s2, acc.s3); \ }) #endif /* DILATION_X==1 && DILATION_Y==1 */ @@ -385,8 +385,8 @@ __kernel void depthwise_convolution_3x3( * @param[in] weights_addr Pointer from where to get weights * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension */ -inline float2 convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes, - const int y_offset, __global uchar *weights_addr, const int weights_stride_y) +inline float2 convolution_3x3_dilation_stridex1_stridey1_f32(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes, + const int y_offset, __global uchar *weights_addr, const int weights_stride_y) { // Load the weights float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y)); @@ -407,9 +407,9 @@ inline float2 convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(__global uc float2 src20_mid = vload2(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); float2 src20_right = vload2(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src00_left, src00_mid, src00_right, weights_row0); - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src10_left, src10_mid, src10_right, weights_row1); - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src20_left, src20_mid, src20_right, weights_row2); + CONVOLUTION1x3_2X1_STRIDE1(pixels0, src00_left, src00_mid, src00_right, weights_row0); + CONVOLUTION1x3_2X1_STRIDE1(pixels0, src10_left, src10_mid, src10_right, weights_row1); + CONVOLUTION1x3_2X1_STRIDE1(pixels0, src20_left, src20_mid, src20_right, weights_row2); return pixels0; } @@ -423,8 +423,8 @@ inline float2 convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(__global uc * @param[in] weights_addr Pointer from where to get weights * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension */ -inline float2 convolution_3x3_dilation_stridex2_stridey2_bifrost_f32(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes, - const int y_offset, __global uchar *weights_addr, const int weights_stride_y) +inline float2 convolution_3x3_dilation_stridex2_stridey2_f32(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes, + const int y_offset, __global uchar *weights_addr, const int weights_stride_y) { // Load the weights float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y)); @@ -445,9 +445,9 @@ inline float2 convolution_3x3_dilation_stridex2_stridey2_bifrost_f32(__global uc float3 src20_mid = vload3(0, (__global float *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); float3 src20_right = vload3(0, (__global float *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); - CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src00_left, src00_mid, src00_right, weights_row0); - CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src10_left, src10_mid, src10_right, weights_row1); - CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src20_left, src20_mid, src20_right, weights_row2); + CONVOLUTION1x3_2X1_STRIDE2(pixels0, src00_left, src00_mid, src00_right, weights_row0); + CONVOLUTION1x3_2X1_STRIDE2(pixels0, src10_left, src10_mid, src10_right, weights_row1); + CONVOLUTION1x3_2X1_STRIDE2(pixels0, src20_left, src20_mid, src20_right, weights_row2); return pixels0; } @@ -491,7 +491,7 @@ inline float2 convolution_3x3_dilation_stridex2_stridey2_bifrost_f32(__global uc * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector */ -__kernel void depthwise_convolution_3x3_stridex1_stridey1_bifrost_f32( +__kernel void depthwise_convolution_3x3_stridex1_stridey1_f32( TENSOR3D_DECLARATION(src), TENSOR3D_DECLARATION(dst), TENSOR3D_DECLARATION(weights) @@ -531,29 +531,29 @@ __kernel void depthwise_convolution_3x3_stridex1_stridey1_bifrost_f32( float4 src40 = vload4(0, (__global float *)(src_addr + 4 * src_stride_y)); // Row4 float4 src50 = vload4(0, (__global float *)(src_addr + 5 * src_stride_y)); // Row5 - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src00, weights_row0); - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src10, weights_row1); - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels0, src20, weights_row2); - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels1, src10, weights_row0); - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels1, src20, weights_row1); - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels1, src30, weights_row2); - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels2, src20, weights_row0); - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels2, src30, weights_row1); - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels2, src40, weights_row2); - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels3, src30, weights_row0); - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels3, src40, weights_row1); - CONVOLUTION1x3_BIFROST2X1_STRIDE1(pixels3, src50, weights_row2); + CONVOLUTION1x3_2X1_STRIDE1(pixels0, src00, weights_row0); + CONVOLUTION1x3_2X1_STRIDE1(pixels0, src10, weights_row1); + CONVOLUTION1x3_2X1_STRIDE1(pixels0, src20, weights_row2); + CONVOLUTION1x3_2X1_STRIDE1(pixels1, src10, weights_row0); + CONVOLUTION1x3_2X1_STRIDE1(pixels1, src20, weights_row1); + CONVOLUTION1x3_2X1_STRIDE1(pixels1, src30, weights_row2); + CONVOLUTION1x3_2X1_STRIDE1(pixels2, src20, weights_row0); + CONVOLUTION1x3_2X1_STRIDE1(pixels2, src30, weights_row1); + CONVOLUTION1x3_2X1_STRIDE1(pixels2, src40, weights_row2); + CONVOLUTION1x3_2X1_STRIDE1(pixels3, src30, weights_row0); + CONVOLUTION1x3_2X1_STRIDE1(pixels3, src40, weights_row1); + CONVOLUTION1x3_2X1_STRIDE1(pixels3, src50, weights_row2); #else /* DILATION_X==1 && DILATION_Y==1 */ //3x3 Convolution of elements starting in 0th row - pixels0 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y); + pixels0 = convolution_3x3_dilation_stridex1_stridey1_f32(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y); //3x3 Convolution of elements starting in 1st row - pixels1 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(src_addr, src_stride_x, src_stride_y, 1, weights_addr, weights_stride_y); + pixels1 = convolution_3x3_dilation_stridex1_stridey1_f32(src_addr, src_stride_x, src_stride_y, 1, weights_addr, weights_stride_y); //3x3 Convolution of elements starting in 2nd row - pixels2 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y); + pixels2 = convolution_3x3_dilation_stridex1_stridey1_f32(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y); //3x3 Convolution of elements starting in 3rd row - pixels3 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f32(src_addr, src_stride_x, src_stride_y, 3, weights_addr, weights_stride_y); + pixels3 = convolution_3x3_dilation_stridex1_stridey1_f32(src_addr, src_stride_x, src_stride_y, 3, weights_addr, weights_stride_y); #endif /* DILATION_X==1 && DILATION_Y==1 */ @@ -611,7 +611,7 @@ __kernel void depthwise_convolution_3x3_stridex1_stridey1_bifrost_f32( * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector */ -__kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f32( +__kernel void depthwise_convolution_3x3_stridex2_stridey2_f32( TENSOR3D_DECLARATION(src), TENSOR3D_DECLARATION(dst), TENSOR3D_DECLARATION(weights) @@ -654,19 +654,19 @@ __kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f32( float4 src40 = vload4(0, (__global float *)(src_addr + 4 * src_stride_y)); // Row4 float2 src41 = vload2(2, (__global float *)(src_addr + 4 * src_stride_y)); // Row4 - CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src00, src01, weights_row0); - CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src10, src11, weights_row1); - CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels0, src20, src21, weights_row2); - CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels1, src20, src21, weights_row0); - CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels1, src30, src31, weights_row1); - CONVOLUTION1x3_BIFROST2X1_STRIDE2(pixels1, src40, src41, weights_row2); + CONVOLUTION1x3_2X1_STRIDE2(pixels0, src00, src01, weights_row0); + CONVOLUTION1x3_2X1_STRIDE2(pixels0, src10, src11, weights_row1); + CONVOLUTION1x3_2X1_STRIDE2(pixels0, src20, src21, weights_row2); + CONVOLUTION1x3_2X1_STRIDE2(pixels1, src20, src21, weights_row0); + CONVOLUTION1x3_2X1_STRIDE2(pixels1, src30, src31, weights_row1); + CONVOLUTION1x3_2X1_STRIDE2(pixels1, src40, src41, weights_row2); #else /* DILATION_X==1 && DILATION_Y==1 */ //3x3 Convolution of elements starting in 0th row - pixels0 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f32(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y); + pixels0 = convolution_3x3_dilation_stridex2_stridey2_f32(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y); //3x3 Convolution of elements starting in 2nd row - pixels1 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f32(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y); + pixels1 = convolution_3x3_dilation_stridex2_stridey2_f32(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y); #endif /* DILATION_X==1 && DILATION_Y==1 */ #ifdef HAS_BIAS @@ -684,104 +684,6 @@ __kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f32( #endif // defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) && defined(IS_F32) -#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DST_WIDTH) -/** Reshape the weights for quantized depthwise convolution - * - * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type, e.g. -DDATA_TYPE=uint8 - * @note Output width should be given as a preprocessor argument using -DDST_WIDTH=width, e.g. -DDST_WIDTH=128 - * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=vec_size, e.g., -DVEC_SIZE=4 - * @attention Input's height and width should be 3 - * - * @param[in] src_ptr Pointer to the source tensor. Supported data types: All - * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) - * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes) - * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor - * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr - * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) - * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) - * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor - */ -__kernel void depthwise_convolution_reshape_weights( - TENSOR3D_DECLARATION(src), - IMAGE_DECLARATION(dst)) -{ - Vector src = CONVERT_TO_VECTOR_STRUCT(src); - const int x = get_global_id(0); - - // Load 3x3xVEC_SIZE weights - VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - w0 = VLOAD(VEC_SIZE)(0, src.ptr + 0 * src_stride_y + 0 * src_stride_z); - VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - w1 = VLOAD(VEC_SIZE)(0, src.ptr + 1 * src_stride_y + 0 * src_stride_z); - VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - w2 = VLOAD(VEC_SIZE)(0, src.ptr + 2 * src_stride_y + 0 * src_stride_z); - VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - w3 = VLOAD(VEC_SIZE)(0, src.ptr + 0 * src_stride_y + 1 * src_stride_z); - VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - w4 = VLOAD(VEC_SIZE)(0, src.ptr + 1 * src_stride_y + 1 * src_stride_z); - VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - w5 = VLOAD(VEC_SIZE)(0, src.ptr + 2 * src_stride_y + 1 * src_stride_z); - VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - w6 = VLOAD(VEC_SIZE)(0, src.ptr + 0 * src_stride_y + 2 * src_stride_z); - VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - w7 = VLOAD(VEC_SIZE)(0, src.ptr + 1 * src_stride_y + 2 * src_stride_z); - VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - w8 = VLOAD(VEC_SIZE)(0, src.ptr + 2 * src_stride_y + 2 * src_stride_z); - - __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * DST_WIDTH * sizeof(DATA_TYPE); - -#if defined(TRANSPOSE) -#if VEC_SIZE != 4 -#error "VEC_SIZE not supported" -#else // VEC_SIZE != 4 - VSTORE(VEC_SIZE) - ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w0.s0, w1.s0, w2.s0, w3.s0), 0, dst_addr + 0); - VSTORE(VEC_SIZE) - ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w4.s0, w5.s0, w6.s0, w7.s0), 0, dst_addr + 1 * sizeof(DATA_TYPE) * VEC_SIZE); - VSTORE(VEC_SIZE) - ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w8.s0, w0.s1, w1.s1, w2.s1), 0, dst_addr + 2 * sizeof(DATA_TYPE) * VEC_SIZE); - VSTORE(VEC_SIZE) - ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w3.s1, w4.s1, w5.s1, w6.s1), 0, dst_addr + 3 * sizeof(DATA_TYPE) * VEC_SIZE); - VSTORE(VEC_SIZE) - ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w7.s1, w8.s1, w0.s2, w1.s2), 0, dst_addr + 4 * sizeof(DATA_TYPE) * VEC_SIZE); - VSTORE(VEC_SIZE) - ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w2.s2, w3.s2, w4.s2, w5.s2), 0, dst_addr + 5 * sizeof(DATA_TYPE) * VEC_SIZE); - VSTORE(VEC_SIZE) - ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w6.s2, w7.s2, w8.s2, w0.s3), 0, dst_addr + 6 * sizeof(DATA_TYPE) * VEC_SIZE); - VSTORE(VEC_SIZE) - ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w1.s3, w2.s3, w3.s3, w4.s3), 0, dst_addr + 7 * sizeof(DATA_TYPE) * VEC_SIZE); - VSTORE(VEC_SIZE) - ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))(w5.s3, w6.s3, w7.s3, w8.s3), 0, dst_addr + 8 * sizeof(DATA_TYPE) * VEC_SIZE); -#endif // VEC_SIZE != 4 -#else // !defined(TRANSPOSE) - VSTORE(VEC_SIZE) - (w0, 0, dst_addr + 0); - VSTORE(VEC_SIZE) - (w1, 0, dst_addr + 1 * sizeof(DATA_TYPE) * VEC_SIZE); - VSTORE(VEC_SIZE) - (w2, 0, dst_addr + 2 * sizeof(DATA_TYPE) * VEC_SIZE); - VSTORE(VEC_SIZE) - (w3, 0, dst_addr + 3 * sizeof(DATA_TYPE) * VEC_SIZE); - VSTORE(VEC_SIZE) - (w4, 0, dst_addr + 4 * sizeof(DATA_TYPE) * VEC_SIZE); - VSTORE(VEC_SIZE) - (w5, 0, dst_addr + 5 * sizeof(DATA_TYPE) * VEC_SIZE); - VSTORE(VEC_SIZE) - (w6, 0, dst_addr + 6 * sizeof(DATA_TYPE) * VEC_SIZE); - VSTORE(VEC_SIZE) - (w7, 0, dst_addr + 7 * sizeof(DATA_TYPE) * VEC_SIZE); - VSTORE(VEC_SIZE) - (w8, 0, dst_addr + 8 * sizeof(DATA_TYPE) * VEC_SIZE); -#endif // defined(TRANSPOSE) -} -#endif // defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DST_WIDTH) - #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) && defined(IS_F16) #if defined(CONV_STRIDE_X) #if CONV_STRIDE_X == 1 @@ -805,8 +707,8 @@ __kernel void depthwise_convolution_reshape_weights( * @param[in] weights_addr Pointer from where to get weights * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension */ -inline half4 convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes, - const int y_offset, __global uchar *weights_addr, const int weights_stride_y) +inline half4 convolution_3x3_dilation_stridex1_stridey1_f16(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes, + const int y_offset, __global uchar *weights_addr, const int weights_stride_y) { // Load the weights half3 weights_row0 = vload3(0, (__global half *)(weights_addr + 0 * weights_stride_y)); @@ -827,9 +729,9 @@ inline half4 convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(__global uch half4 src20_mid = vload4(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); half4 src20_right = vload4(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src00_left, src00_mid, src00_right, weights_row0); - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src10_left, src10_mid, src10_right, weights_row1); - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src20_left, src20_mid, src20_right, weights_row2); + CONVOLUTION1x3_4X1_STRIDE1(pixels0, src00_left, src00_mid, src00_right, weights_row0); + CONVOLUTION1x3_4X1_STRIDE1(pixels0, src10_left, src10_mid, src10_right, weights_row1); + CONVOLUTION1x3_4X1_STRIDE1(pixels0, src20_left, src20_mid, src20_right, weights_row2); return pixels0; } @@ -843,8 +745,8 @@ inline half4 convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(__global uch * @param[in] weights_addr Pointer from where to get weights * @param[in] weights_stride_y Stride of weights tesnsor in Y dimension */ -inline half4 convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes, - const int y_offset, __global uchar *weights_addr, const int weights_stride_y) +inline half4 convolution_3x3_dilation_stridex2_stridey2_f16(__global uchar *src_addr, const int stride_x_bytes, const int stride_y_bytes, + const int y_offset, __global uchar *weights_addr, const int weights_stride_y) { // Load the weights half3 weights_row0 = vload3(0, (__global half *)(weights_addr + 0 * weights_stride_y)); @@ -865,9 +767,9 @@ inline half4 convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(__global uch half8 src20_mid = vload8(0, (__global half *)ptr_offset(src_addr, DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); half8 src20_right = vload8(0, (__global half *)ptr_offset(src_addr, 2 * DILATION_X, y_offset + DILATION_Y * 2, stride_x_bytes, stride_y_bytes)); - CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src00_left, src00_mid, src00_right, weights_row0); - CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src10_left, src10_mid, src10_right, weights_row1); - CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src20_left, src20_mid, src20_right, weights_row2); + CONVOLUTION1x3_4X1_STRIDE2(pixels0, src00_left, src00_mid, src00_right, weights_row0); + CONVOLUTION1x3_4X1_STRIDE2(pixels0, src10_left, src10_mid, src10_right, weights_row1); + CONVOLUTION1x3_4X1_STRIDE2(pixels0, src20_left, src20_mid, src20_right, weights_row2); return pixels0; } @@ -1127,7 +1029,7 @@ __kernel void depthwise_convolution_3x3_f16( * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector */ -__kernel void depthwise_convolution_3x3_stridex1_stridey1_bifrost_f16( +__kernel void depthwise_convolution_3x3_stridex1_stridey1_f16( TENSOR3D_DECLARATION(src), TENSOR3D_DECLARATION(dst), TENSOR3D_DECLARATION(weights) @@ -1174,29 +1076,29 @@ __kernel void depthwise_convolution_3x3_stridex1_stridey1_bifrost_f16( half8 src40 = vload8(0, (__global half *)(src_addr + 4 * src_stride_y)); // Row4 half8 src50 = vload8(0, (__global half *)(src_addr + 5 * src_stride_y)); // Row5 - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src00, weights_row0); - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src10, weights_row1); - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels0, src20, weights_row2); - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels1, src10, weights_row0); - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels1, src20, weights_row1); - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels1, src30, weights_row2); - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels2, src20, weights_row0); - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels2, src30, weights_row1); - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels2, src40, weights_row2); - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels3, src30, weights_row0); - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels3, src40, weights_row1); - CONVOLUTION1x3_BIFROST4X1_STRIDE1(pixels3, src50, weights_row2); + CONVOLUTION1x3_4X1_STRIDE1(pixels0, src00, weights_row0); + CONVOLUTION1x3_4X1_STRIDE1(pixels0, src10, weights_row1); + CONVOLUTION1x3_4X1_STRIDE1(pixels0, src20, weights_row2); + CONVOLUTION1x3_4X1_STRIDE1(pixels1, src10, weights_row0); + CONVOLUTION1x3_4X1_STRIDE1(pixels1, src20, weights_row1); + CONVOLUTION1x3_4X1_STRIDE1(pixels1, src30, weights_row2); + CONVOLUTION1x3_4X1_STRIDE1(pixels2, src20, weights_row0); + CONVOLUTION1x3_4X1_STRIDE1(pixels2, src30, weights_row1); + CONVOLUTION1x3_4X1_STRIDE1(pixels2, src40, weights_row2); + CONVOLUTION1x3_4X1_STRIDE1(pixels3, src30, weights_row0); + CONVOLUTION1x3_4X1_STRIDE1(pixels3, src40, weights_row1); + CONVOLUTION1x3_4X1_STRIDE1(pixels3, src50, weights_row2); #else /* DILATION_X==1 && DILATION_Y==1 */ //3x3 Convolution of elements starting in 0th row - pixels0 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y); + pixels0 = convolution_3x3_dilation_stridex1_stridey1_f16(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y); //3x3 Convolution of elements starting in 1st row - pixels1 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(src_addr, src_stride_x, src_stride_y, 1, weights_addr, weights_stride_y); + pixels1 = convolution_3x3_dilation_stridex1_stridey1_f16(src_addr, src_stride_x, src_stride_y, 1, weights_addr, weights_stride_y); //3x3 Convolution of elements starting in 2nd row - pixels2 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y); + pixels2 = convolution_3x3_dilation_stridex1_stridey1_f16(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y); //3x3 Convolution of elements starting in 3rd row - pixels3 = convolution_3x3_dilation_stridex1_stridey1_bifrost_f16(src_addr, src_stride_x, src_stride_y, 3, weights_addr, weights_stride_y); + pixels3 = convolution_3x3_dilation_stridex1_stridey1_f16(src_addr, src_stride_x, src_stride_y, 3, weights_addr, weights_stride_y); #endif /* DILATION_X==1 && DILATION_Y==1 */ @@ -1250,7 +1152,7 @@ __kernel void depthwise_convolution_3x3_stridex1_stridey1_bifrost_f16( * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector */ -__kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f16( +__kernel void depthwise_convolution_3x3_stridex2_stridey2_f16( TENSOR3D_DECLARATION(src), TENSOR3D_DECLARATION(dst), TENSOR3D_DECLARATION(weights) @@ -1300,18 +1202,18 @@ __kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f16( half8 src40 = vload8(0, (__global half *)(src_addr + 4 * src_stride_y)); // Row4 half2 src41 = vload2(4, (__global half *)(src_addr + 4 * src_stride_y)); // Row4 - CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src00, src01, weights_row0); - CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src10, src11, weights_row1); - CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels0, src20, src21, weights_row2); - CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels1, src20, src21, weights_row0); - CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels1, src30, src31, weights_row1); - CONVOLUTION1x3_BIFROST4X1_STRIDE2(pixels1, src40, src41, weights_row2); + CONVOLUTION1x3_4X1_STRIDE2(pixels0, src00, src01, weights_row0); + CONVOLUTION1x3_4X1_STRIDE2(pixels0, src10, src11, weights_row1); + CONVOLUTION1x3_4X1_STRIDE2(pixels0, src20, src21, weights_row2); + CONVOLUTION1x3_4X1_STRIDE2(pixels1, src20, src21, weights_row0); + CONVOLUTION1x3_4X1_STRIDE2(pixels1, src30, src31, weights_row1); + CONVOLUTION1x3_4X1_STRIDE2(pixels1, src40, src41, weights_row2); #else /* DILATION_X==1 && DILATION_Y==1 */ //3x3 Convolution of elements starting in 0th row - pixels0 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y); + pixels0 = convolution_3x3_dilation_stridex2_stridey2_f16(src_addr, src_stride_x, src_stride_y, 0, weights_addr, weights_stride_y); //3x3 Convolution of elements starting in 2nd row - pixels1 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y); + pixels1 = convolution_3x3_dilation_stridex2_stridey2_f16(src_addr, src_stride_x, src_stride_y, 2, weights_addr, weights_stride_y); #endif /* DILATION_X==1 && DILATION_Y==1 */ #ifdef HAS_BIAS diff --git a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl index c7fe401f80..000dce1590 100644 --- a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl +++ b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl @@ -334,9 +334,9 @@ __kernel void dwc_3x3_native_quantized8_nchw( #else // defined(REAL_MULTIPLIER) #if defined(PER_CHANNEL_QUANTIZATION) - int8 res0_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, output_multiplier, output_shift, 8); - int8 res0_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, output_multiplier, output_shift, 8); - values0 = select(res0_shift_lt0, res0_shift_gt0, (int8)(output_shift) >= 0); + int8 res0_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, output_multiplier, output_shift, 8); + int8 res0_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, output_multiplier, output_shift, 8); + values0 = select(res0_shift_lt0, res0_shift_gt0, (int8)(output_shift) >= 0); #else // defined(PER_CHANNEL_QUANTIZATION) #if OUTPUT_SHIFT < 0 values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8); @@ -360,9 +360,9 @@ __kernel void dwc_3x3_native_quantized8_nchw( #else // defined(REAL_MULTIPLIER) #if defined(PER_CHANNEL_QUANTIZATION) - int8 res1_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, output_multiplier, output_shift, 8); - int8 res1_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, output_multiplier, output_shift, 8); - values1 = select(res1_shift_lt0, res1_shift_gt0, (int8)(output_shift) >= 0); + int8 res1_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, output_multiplier, output_shift, 8); + int8 res1_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, output_multiplier, output_shift, 8); + values1 = select(res1_shift_lt0, res1_shift_gt0, (int8)(output_shift) >= 0); #else // defined(PER_CHANNEL_QUANTIZATION) #if OUTPUT_SHIFT < 0 values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8); @@ -446,8 +446,8 @@ __kernel void dwc_3x3_native_quantized8_nchw( VEC_TYPE(16) \ temp0 = vload16(0, (__global DATA_TYPE *)(first_value)); \ VEC_TYPE(8) \ - temp1 = vload8(0, (__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE)))); \ - left = (VEC_TYPE(8))(temp0.s0369, temp0.scf, temp1.s25); \ + temp1 = vload8(0, (__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))); \ + left = (VEC_TYPE(8))(temp0.s0369, temp0.scf, temp1.s25); \ \ temp0 = vload16(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))); \ temp1 = vload8(0, (__global DATA_TYPE *)(first_value + (16 + DILATION_X) * sizeof(DATA_TYPE))); \ @@ -776,835 +776,6 @@ __kernel void dwc_3x3_native_quantized8_dot8_nchw( #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) -#if defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && VEC_SIZE == 4 -/** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width or height is not 1. - * - * @note This kernel assumes VEC_SIZE is 4. - * @note The weights tensor is expected to be reshaped using @ref CLDepthwiseConvolutionLayerReshapeWeightsKernel. - * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2) - * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112) - * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1) - * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1) - * @note The convolution stride along the width must be passed at compile time using -DCONV_STRIDE_X (e.g. -DCONV_STRIDE_Y=X) - * @note The convolution stride along the height must be passed at compile time using -DCONV_STRIDE_Y (e.g. -DCONV_STRIDE_Y=1) - * - * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED - * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) - * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes) - * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes) - * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes) - * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor - * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr - * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) - * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) - * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes) - * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor - * @param[in] weights_ptr Pointer to the weights tensor reshaped. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL - * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) - * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) - * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor - * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32 - * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes) - * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector - * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32 - * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes) - * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector - * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32 - * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) - * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector - * @param[in] max_offset Max offset for the input tensor - */ -__kernel void dwc_3x3_reshaped_quantized8_nhwc( - TENSOR4D_DECLARATION(src), - TENSOR4D_DECLARATION(dst), - IMAGE_DECLARATION(weights), - VECTOR_DECLARATION(output_multipliers), - VECTOR_DECLARATION(output_shifts), -#if defined(HAS_BIAS) - VECTOR_DECLARATION(biases), -#endif /* defined(HAS_BIAS) */ - int max_offset) -{ - const int x = get_global_id(0); // channels - const int y = get_global_id(1); // spatial coordinate x -#if defined(DST_DEPTH) - int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y - int b = get_global_id(2) / (int)DST_DEPTH; // batch -#else // defined(DST_DEPTH) - int z = get_global_id(2); // spatial coordinate y -#endif // defined(DST_DEPTH) - - __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x * weights_stride_y; - -#if defined(DST_DEPTH) - __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE + b * src_stride_w; -#else /* defined(DST_DEPTH) */ - __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE; -#endif /* defined(DST_DEPTH) */ - - int z_coord = 0; - int4 offset = 0; - int4 y_coord = ((int4)(y * CONV_STRIDE_X) + (int4)(0, DILATION_X * 1, DILATION_X * 2, DILATION_X * 3)) - (int)CONV_PAD_LEFT; - - // Only for y = 0 we can have a negative coordinate. If so, we convert it to SRC_DIM_1 - y_coord.s0 = min((uint)y_coord.s0, (uint)SRC_DIM_1); - y_coord.s1 = min((uint)y_coord.s1, (uint)SRC_DIM_1); - y_coord.s2 = min((uint)y_coord.s2, (uint)SRC_DIM_1); - y_coord.s3 = min((uint)y_coord.s3, (uint)SRC_DIM_1); - - int4 y_offset = convert_int4(y_coord * (int)src_stride_y); - - // We compute VEC_SIZEx1x1 [C,W,H] elements - VEC_INT acc = 0, sum = 0; - - // Load weights - VEC_DATA_TYPE(WEIGHTS_TYPE, 16) - w0_tmp = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr)); - VEC_DATA_TYPE(WEIGHTS_TYPE, 16) - w1_tmp = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr + 16)); - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w8 = VLOAD(4)(0, (__global WEIGHTS_TYPE *)(weights_addr + 2 * 16)); - - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w0 = w0_tmp.s0123; - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w1 = w0_tmp.s4567; - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w2 = w0_tmp.s89AB; - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w3 = w0_tmp.sCDEF; - - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w4 = w1_tmp.s0123; - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w5 = w1_tmp.s4567; - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w6 = w1_tmp.s89AB; - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w7 = w1_tmp.sCDEF; - -#if INPUT_OFFSET != 0 - VEC_INT sum_we = CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT) - + CONVERT(w3, VEC_INT) + CONVERT(w4, VEC_INT) + CONVERT(w5, VEC_INT) - + CONVERT(w6, VEC_INT) + CONVERT(w7, VEC_INT) + CONVERT(w8, VEC_INT); -#endif /* INPUT_OFFSET != 0 */ - - // Load input values - // z == 0 - // Clamp z_coord as for z = 0, it can be negative - // z_coord is casted to unsigned int in order to use just a min() operation - // A "-1" 32 bit signed variable converted to unsigned gives 4294967295 - z_coord = z * (int)CONV_STRIDE_Y - (int)CONV_PAD_TOP; - z_coord = min((uint)z_coord, (uint)SRC_DIM_2); - offset = select(y_offset + (int4)(z_coord * src_stride_z), (int4)max_offset, (int4)z_coord < 0 || (int4)z_coord >= SRC_DIM_2); - VEC_TYPE(VEC_SIZE) - values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); - VEC_TYPE(VEC_SIZE) - values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); - VEC_TYPE(VEC_SIZE) - values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); - - // z == 1 - // z_coord can be only negative for z = 0 so we do not need to clamp it - // Moreover z_coord cannot be out-of-bound for z = 1 so we do not need to clamp the offset - z_coord = z * (int)CONV_STRIDE_Y - (int)CONV_PAD_TOP + DILATION_Y; - z_coord = min((uint)z_coord, (uint)SRC_DIM_2); - offset = select(y_offset + (int4)(z_coord * src_stride_z), (int4)max_offset, (int4)z_coord < 0 || (int4)z_coord >= SRC_DIM_2); - VEC_TYPE(VEC_SIZE) - values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); - VEC_TYPE(VEC_SIZE) - values4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); - VEC_TYPE(VEC_SIZE) - values5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); - - // z == 2 - // Offset can be out-of-bound so we need to check if it is greater than max_offset - z_coord = z * (int)CONV_STRIDE_Y - (int)CONV_PAD_TOP + DILATION_Y * 2; - z_coord = min((uint)z_coord, (uint)SRC_DIM_2); - offset = select(y_offset + (int4)(z_coord * src_stride_z), (int4)max_offset, (int4)z_coord < 0 || (int4)z_coord >= SRC_DIM_2); - VEC_TYPE(VEC_SIZE) - values6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); - VEC_TYPE(VEC_SIZE) - values7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); - VEC_TYPE(VEC_SIZE) - values8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); - - MULTIPLY_ADD_ACCUMULATE(values0, w0, acc, sum); - MULTIPLY_ADD_ACCUMULATE(values1, w1, acc, sum); - MULTIPLY_ADD_ACCUMULATE(values2, w2, acc, sum); - - MULTIPLY_ADD_ACCUMULATE(values3, w3, acc, sum); - MULTIPLY_ADD_ACCUMULATE(values4, w4, acc, sum); - MULTIPLY_ADD_ACCUMULATE(values5, w5, acc, sum); - - MULTIPLY_ADD_ACCUMULATE(values6, w6, acc, sum); - MULTIPLY_ADD_ACCUMULATE(values7, w7, acc, sum); - MULTIPLY_ADD_ACCUMULATE(values8, w8, acc, sum); - -#if defined(HAS_BIAS) - Vector biases = CONVERT_TO_VECTOR_STRUCT(biases); - VEC_INT bias_values = VLOAD(VEC_SIZE)(0, (__global int *)biases.ptr); - acc += bias_values; -#endif // defined(HAS_BIAS) - -#if WEIGHTS_OFFSET != 0 - acc += WEIGHTS_OFFSET * sum; -#endif /* WEIGHTS_OFFSET != 0 */ - -#if INPUT_OFFSET != 0 - acc += INPUT_OFFSET * sum_we; -#endif /* INPUT_OFFSET != 0 */ - -#if K_OFFSET != 0 - acc += (VEC_INT)K_OFFSET; -#endif /* K_OFFSET != 0 */ - -#if defined(REAL_MULTIPLIER) - - acc = CONVERT(round(CONVERT(acc, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT); - -#else // defined(REAL_MULTIPLIER) - -#if defined(PER_CHANNEL_QUANTIZATION) - Vector output_multipliers = CONVERT_TO_VECTOR_STRUCT(output_multipliers); - Vector output_shifts = CONVERT_TO_VECTOR_STRUCT(output_shifts); - VEC_INT output_multiplier = VLOAD(VEC_SIZE)(0, (__global int *)output_multipliers.ptr); - VEC_INT output_shift = VLOAD(VEC_SIZE)(0, (__global int *)output_shifts.ptr); - - VEC_INT res_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc, output_multiplier, output_shift, VEC_SIZE); - VEC_INT res_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc, output_multiplier, output_shift); - acc = select(res_shift_lt0, res_shift_gt0, output_shift >= 0); -#else // defined(PER_CHANNEL_QUANTIZATION) -#if OUTPUT_SHIFT < 0 - acc = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE); -#else // OUTPUT_SHIFT < 0 - acc = asymm_mult_by_quant_multiplier_less_than_one(acc, OUTPUT_MULTIPLIER, OUTPUT_SHIFT); -#endif // OUTPUT_SHIFT < 0 -#endif // defined(PER_CHANNEL_QUANTIZATION) - -#endif // defined(REAL_MULTIPLIER) - - acc += (VEC_INT)OUTPUT_OFFSET; - - VEC_TYPE(VEC_SIZE) - res = CONVERT_SAT(acc, VEC_TYPE(VEC_SIZE)); - -#if defined(DST_DEPTH) - __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + z * dst_step_z + b * dst_stride_w; -#else /* defined(DST_DEPTH) */ - __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + z * dst_step_z; -#endif /* defined(DST_DEPTH) */ - - VSTORE(VEC_SIZE) - (ACTIVATION_FUNC(res), 0, (__global DATA_TYPE *)(dst_addr)); -} -#endif // defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) - -#if defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED) && VEC_SIZE == 4 -/** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width and height is 1. - * - * @note This kernel assumes VEC_SIZE is 4. - * @note The weights tensor is expected to be reshaped using @ref CLDepthwiseConvolutionLayerReshapeWeightsKernel. - * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2) - * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112) - * @note The number of rows processed per thread must be passed at compile time using -DNUM_ROWS_PROCESSED (i.e. -DNUM_ROWS_PROCESSED=2) - * @note The number of planes processed per thread must be passed at compile time using -DNUM_PLANES_PROCESSED (i.e. -DNUM_PLANES_PROCESSED=2) - * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1) - * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1). - * - * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED - * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) - * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes) - * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes) - * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes) - * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor - * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr - * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) - * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) - * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes) - * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor - * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL - * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) - * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) - * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor - * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32 - * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes) - * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector - * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32 - * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes) - * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector - * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32 - * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) - * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector - * @param[in] max_offset Max offset for the input tensor - */ - -__kernel void dwc_3x3_reshaped_quantized8_stride1_nhwc( - TENSOR4D_DECLARATION(src), - TENSOR4D_DECLARATION(dst), - IMAGE_DECLARATION(weights), - VECTOR_DECLARATION(output_multipliers), - VECTOR_DECLARATION(output_shifts), -#if defined(HAS_BIAS) - VECTOR_DECLARATION(biases), -#endif /* defined(HAS_BIAS) */ - int max_offset) -{ - int x = get_global_id(0); - int y = get_global_id(1); -#if defined(DST_DEPTH) - int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y - int b = get_global_id(2) / (int)DST_DEPTH; // batch -#else // defined(DST_DEPTH) - int z = get_global_id(2); // spatial coordinate y -#endif // defined(DST_DEPTH) - - __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x * weights_stride_y; - -#if defined(DST_DEPTH) - __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE + b * src_stride_w; -#else /* defined(DST_DEPTH) */ - __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE; -#endif /* defined(DST_DEPTH) */ - - int z_coord = 0; - int4 offset = 0; - int4 y_coord = ((int4)(y * NUM_ROWS_PROCESSED) + (int4)(0, 1, 2, 3)) - (int)CONV_PAD_LEFT; - - // Only for y = 0 we can have a negative coordinate. If so, we convert it to SRC_DIM_1 - y_coord.s0 = min((uint)y_coord.s0, (uint)SRC_DIM_1); - y_coord.s1 = min((uint)y_coord.s1, (uint)SRC_DIM_1); - y_coord.s2 = min((uint)y_coord.s2, (uint)SRC_DIM_1); - y_coord.s3 = min((uint)y_coord.s3, (uint)SRC_DIM_1); - - int4 y_offset = convert_int4(y_coord * (int)src_stride_y); - - // We compute 4x2x2 [C,W,H] elements - VEC_INT acc0 = 0, sum0 = 0; - VEC_INT acc1 = 0, sum1 = 0; - VEC_INT acc2 = 0, sum2 = 0; - VEC_INT acc3 = 0, sum3 = 0; - - // Load weights - VEC_DATA_TYPE(WEIGHTS_TYPE, 16) - w0_tmp = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr)); - VEC_DATA_TYPE(WEIGHTS_TYPE, 16) - w1_tmp = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr + 16)); - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w8 = VLOAD(4)(0, (__global WEIGHTS_TYPE *)(weights_addr + 2 * 16)); - - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w0 = w0_tmp.s0123; - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w1 = w0_tmp.s4567; - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w2 = w0_tmp.s89AB; - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w3 = w0_tmp.sCDEF; - - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w4 = w1_tmp.s0123; - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w5 = w1_tmp.s4567; - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w6 = w1_tmp.s89AB; - VEC_DATA_TYPE(WEIGHTS_TYPE, 4) - w7 = w1_tmp.sCDEF; - -#if INPUT_OFFSET != 0 - VEC_INT sum_we = CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT) - + CONVERT(w3, VEC_INT) + CONVERT(w4, VEC_INT) + CONVERT(w5, VEC_INT) - + CONVERT(w6, VEC_INT) + CONVERT(w7, VEC_INT) + CONVERT(w8, VEC_INT); -#endif /* INPUT_OFFSET != 0 */ - - // Load input values - // z == 0 - // Clamp z_coord as for z = 0, it can be negative - // z_coord is casted to unsigned int in order to use just a min() operation - // A "-1" 32 bit signed variable converted to unsigned gives 4294967295 - z_coord = z * (int)NUM_PLANES_PROCESSED - (int)CONV_PAD_TOP; - z_coord = min((uint)z_coord, (uint)SRC_DIM_2); - offset = select(y_offset + (int4)(z_coord * src_stride_z), (int4)max_offset, (int4)z_coord < 0 || (int4)z_coord >= SRC_DIM_2); - VEC_TYPE(VEC_SIZE) - values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); - VEC_TYPE(VEC_SIZE) - values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); - VEC_TYPE(VEC_SIZE) - values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); - VEC_TYPE(VEC_SIZE) - values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3)); - - // z == 1 - z_coord = z * (int)NUM_PLANES_PROCESSED - (int)CONV_PAD_TOP + 1; - z_coord = min((uint)z_coord, (uint)SRC_DIM_2); - offset = select(y_offset + (int4)(z_coord * src_stride_z), (int4)max_offset, (int4)z_coord < 0 || (int4)z_coord >= SRC_DIM_2); - VEC_TYPE(VEC_SIZE) - values4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); - VEC_TYPE(VEC_SIZE) - values5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); - VEC_TYPE(VEC_SIZE) - values6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); - VEC_TYPE(VEC_SIZE) - values7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3)); - - // z == 2 - z_coord = z * (int)NUM_PLANES_PROCESSED - (int)CONV_PAD_TOP + 2; - z_coord = min((uint)z_coord, (uint)SRC_DIM_2); - offset = select(y_offset + (int4)(z_coord * src_stride_z), (int4)max_offset, (int4)z_coord < 0 || (int4)z_coord >= SRC_DIM_2); - VEC_TYPE(VEC_SIZE) - values8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); - VEC_TYPE(VEC_SIZE) - values9 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); - VEC_TYPE(VEC_SIZE) - values10 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); - VEC_TYPE(VEC_SIZE) - values11 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3)); - - // z == 3 - z_coord = z * (int)NUM_PLANES_PROCESSED - (int)CONV_PAD_TOP + 3; - z_coord = min((uint)z_coord, (uint)SRC_DIM_2); - offset = select(y_offset + (int4)(z_coord * src_stride_z), (int4)max_offset, (int4)z_coord < 0 || (int4)z_coord >= SRC_DIM_2); - VEC_TYPE(VEC_SIZE) - values12 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); - VEC_TYPE(VEC_SIZE) - values13 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); - VEC_TYPE(VEC_SIZE) - values14 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); - VEC_TYPE(VEC_SIZE) - values15 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3)); - - MULTIPLY_ADD_ACCUMULATE(values0, w0, acc0, sum0); - MULTIPLY_ADD_ACCUMULATE(values1, w1, acc0, sum0); - MULTIPLY_ADD_ACCUMULATE(values2, w2, acc0, sum0); - MULTIPLY_ADD_ACCUMULATE(values1, w0, acc1, sum1); - MULTIPLY_ADD_ACCUMULATE(values2, w1, acc1, sum1); - MULTIPLY_ADD_ACCUMULATE(values3, w2, acc1, sum1); - - MULTIPLY_ADD_ACCUMULATE(values4, w3, acc0, sum0); - MULTIPLY_ADD_ACCUMULATE(values5, w4, acc0, sum0); - MULTIPLY_ADD_ACCUMULATE(values6, w5, acc0, sum0); - MULTIPLY_ADD_ACCUMULATE(values5, w3, acc1, sum1); - MULTIPLY_ADD_ACCUMULATE(values6, w4, acc1, sum1); - MULTIPLY_ADD_ACCUMULATE(values7, w5, acc1, sum1); - - MULTIPLY_ADD_ACCUMULATE(values8, w6, acc0, sum0); - MULTIPLY_ADD_ACCUMULATE(values9, w7, acc0, sum0); - MULTIPLY_ADD_ACCUMULATE(values10, w8, acc0, sum0); - MULTIPLY_ADD_ACCUMULATE(values9, w6, acc1, sum1); - MULTIPLY_ADD_ACCUMULATE(values10, w7, acc1, sum1); - MULTIPLY_ADD_ACCUMULATE(values11, w8, acc1, sum1); - - MULTIPLY_ADD_ACCUMULATE(values4, w0, acc2, sum2); - MULTIPLY_ADD_ACCUMULATE(values5, w1, acc2, sum2); - MULTIPLY_ADD_ACCUMULATE(values6, w2, acc2, sum2); - MULTIPLY_ADD_ACCUMULATE(values5, w0, acc3, sum3); - MULTIPLY_ADD_ACCUMULATE(values6, w1, acc3, sum3); - MULTIPLY_ADD_ACCUMULATE(values7, w2, acc3, sum3); - - MULTIPLY_ADD_ACCUMULATE(values8, w3, acc2, sum2); - MULTIPLY_ADD_ACCUMULATE(values9, w4, acc2, sum2); - MULTIPLY_ADD_ACCUMULATE(values10, w5, acc2, sum2); - MULTIPLY_ADD_ACCUMULATE(values9, w3, acc3, sum3); - MULTIPLY_ADD_ACCUMULATE(values10, w4, acc3, sum3); - MULTIPLY_ADD_ACCUMULATE(values11, w5, acc3, sum3); - - MULTIPLY_ADD_ACCUMULATE(values12, w6, acc2, sum2); - MULTIPLY_ADD_ACCUMULATE(values13, w7, acc2, sum2); - MULTIPLY_ADD_ACCUMULATE(values14, w8, acc2, sum2); - MULTIPLY_ADD_ACCUMULATE(values13, w6, acc3, sum3); - MULTIPLY_ADD_ACCUMULATE(values14, w7, acc3, sum3); - MULTIPLY_ADD_ACCUMULATE(values15, w8, acc3, sum3); - -#if defined(HAS_BIAS) - Vector biases = CONVERT_TO_VECTOR_STRUCT(biases); - - VEC_INT bias_values = VLOAD(VEC_SIZE)(0, (__global int *)biases.ptr); - - acc0 += bias_values; - acc1 += bias_values; - acc2 += bias_values; - acc3 += bias_values; -#endif /* defined(HAS_BIAS) */ - -#if WEIGHTS_OFFSET != 0 - acc0 += WEIGHTS_OFFSET * sum0; - acc1 += WEIGHTS_OFFSET * sum1; - acc2 += WEIGHTS_OFFSET * sum2; - acc3 += WEIGHTS_OFFSET * sum3; -#endif /* WEIGHTS_OFFSET != 0 */ - -#if INPUT_OFFSET != 0 - VEC_INT offs = INPUT_OFFSET * sum_we; - - acc0 += offs; - acc1 += offs; - acc2 += offs; - acc3 += offs; -#endif /* INPUT_OFFSET != 0 */ - -#if K_OFFSET != 0 - acc0 += (VEC_INT)K_OFFSET; - acc1 += (VEC_INT)K_OFFSET; - acc2 += (VEC_INT)K_OFFSET; - acc3 += (VEC_INT)K_OFFSET; -#endif /* K_OFFSET != 0 */ - -#if defined(REAL_MULTIPLIER) - - acc0 = CONVERT(round(CONVERT(acc0, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT); - acc1 = CONVERT(round(CONVERT(acc1, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT); - acc2 = CONVERT(round(CONVERT(acc2, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT); - acc3 = CONVERT(round(CONVERT(acc3, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT); - -#else // defined(REAL_MULTIPLIER) - -#if defined(PER_CHANNEL_QUANTIZATION) - Vector output_multipliers = CONVERT_TO_VECTOR_STRUCT(output_multipliers); - Vector output_shifts = CONVERT_TO_VECTOR_STRUCT(output_shifts); - VEC_INT output_multiplier = VLOAD(VEC_SIZE)(0, (__global int *)output_multipliers.ptr); - VEC_INT output_shift = VLOAD(VEC_SIZE)(0, (__global int *)output_shifts.ptr); - - VEC_INT res0_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc0, output_multiplier, output_shift, VEC_SIZE); - VEC_INT res1_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc1, output_multiplier, output_shift, VEC_SIZE); - VEC_INT res2_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc2, output_multiplier, output_shift, VEC_SIZE); - VEC_INT res3_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc3, output_multiplier, output_shift, VEC_SIZE); - VEC_INT res0_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc0, output_multiplier, output_shift); - VEC_INT res1_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc1, output_multiplier, output_shift); - VEC_INT res2_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc2, output_multiplier, output_shift); - VEC_INT res3_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc3, output_multiplier, output_shift); - acc0 = select(res0_shift_lt0, res0_shift_gt0, output_shift >= 0); - acc1 = select(res1_shift_lt0, res1_shift_gt0, output_shift >= 0); - acc2 = select(res2_shift_lt0, res2_shift_gt0, output_shift >= 0); - acc3 = select(res3_shift_lt0, res3_shift_gt0, output_shift >= 0); -#else // defined(PER_CHANNEL_QUANTIZATION) -#if OUTPUT_SHIFT < 0 - acc0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE); - acc1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE); - acc2 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc2, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE); - acc3 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc3, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE); -#else // OUTPUT_SHIFT < 0 - acc0 = asymm_mult_by_quant_multiplier_less_than_one(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT); - acc1 = asymm_mult_by_quant_multiplier_less_than_one(acc1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT); - acc2 = asymm_mult_by_quant_multiplier_less_than_one(acc2, OUTPUT_MULTIPLIER, OUTPUT_SHIFT); - acc3 = asymm_mult_by_quant_multiplier_less_than_one(acc3, OUTPUT_MULTIPLIER, OUTPUT_SHIFT); -#endif // OUTPUT_SHIFT < 0 -#endif // defined(PER_CHANNEL_QUANTIZATION) - -#endif // defined(REAL_MULTIPLIER) - - acc0 += (VEC_INT)OUTPUT_OFFSET; - acc1 += (VEC_INT)OUTPUT_OFFSET; - acc2 += (VEC_INT)OUTPUT_OFFSET; - acc3 += (VEC_INT)OUTPUT_OFFSET; - - VEC_TYPE(VEC_SIZE) - res0 = CONVERT_SAT(acc0, VEC_TYPE(VEC_SIZE)); - VEC_TYPE(VEC_SIZE) - res1 = CONVERT_SAT(acc1, VEC_TYPE(VEC_SIZE)); - VEC_TYPE(VEC_SIZE) - res2 = CONVERT_SAT(acc2, VEC_TYPE(VEC_SIZE)); - VEC_TYPE(VEC_SIZE) - res3 = CONVERT_SAT(acc3, VEC_TYPE(VEC_SIZE)); - -#if defined(DST_DEPTH) - __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + (z * NUM_PLANES_PROCESSED) * dst_step_z + b * dst_stride_w; -#else /* defined(DST_DEPTH) */ - __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + (z * NUM_PLANES_PROCESSED) * dst_step_z; -#endif /* defined(DST_DEPTH) */ - - VSTORE(VEC_SIZE) - (ACTIVATION_FUNC(res0), 0, dst_addr + 0 * dst_stride_y); - VSTORE(VEC_SIZE) - (ACTIVATION_FUNC(res1), 0, dst_addr + 1 * dst_stride_y); - -#if((DST_DIM_2 % NUM_PLANES_PROCESSED) != 0) - if((z * NUM_PLANES_PROCESSED + 1) < DST_DIM_2) -#endif // ((DST_DIM_2 % NUM_PLANES_PROCESSED) != 0) - { - VSTORE(VEC_SIZE) - (ACTIVATION_FUNC(res2), 0, (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y + 1 * dst_stride_z)); - VSTORE(VEC_SIZE) - (ACTIVATION_FUNC(res3), 0, (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y + 1 * dst_stride_z)); - } -} - -#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) && VEC_SIZE == 4 -/** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width and height is 1 using dot product. - * - * @note Per-channel quantization is not supported by this kernel. - * @note This kernel assumes VEC_SIZE is 4. - * @note The weights tensor is expected to be reshaped using @ref CLDepthwiseConvolutionLayerReshapeWeightsKernel. - * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2) - * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112) - * @note The number of rows processed per thread must be passed at compile time using -DNUM_ROWS_PROCESSED (i.e. -DNUM_ROWS_PROCESSED=2) - * @note The number of planes processed per thread must be passed at compile time using -DNUM_PLANES_PROCESSED (i.e. -DNUM_PLANES_PROCESSED=2) - * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1) - * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1). - * @note If REAL_MULTIPLIER is passed at compile time (i.e. -DREAL_MULTIPLIER=1.355f), the final quantization is performed using a floating point multiplication. - * If not, the quantization will be performed using a fixed point multiplication - * - * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED - * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) - * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) - * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) - * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes) - * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes) - * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes) - * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor - * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr - * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) - * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) - * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes) - * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor - * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr - * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) - * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) - * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor - * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32 - * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes) - * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector - * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32 - * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes) - * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector - * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32 - * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) - * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector - * @param[in] max_offset The maximum allowed offset for the input tensor - */ -__kernel void dwc_3x3_reshaped_quantized8_dot8_stride1_nhwc( - TENSOR4D_DECLARATION(src), - TENSOR4D_DECLARATION(dst), - IMAGE_DECLARATION(weights), - VECTOR_DECLARATION(output_multipliers), - VECTOR_DECLARATION(output_shifts), -#if defined(HAS_BIAS) - VECTOR_DECLARATION(biases), -#endif // defined(HAS_BIAS) - int max_offset) -{ - int x = get_global_id(0); - int y = get_global_id(1); -#if defined(DST_DEPTH) - int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y - int b = get_global_id(2) / (int)DST_DEPTH; // batch -#else // defined(DST_DEPTH) - int z = get_global_id(2); // spatial coordinate y -#endif // defined(DST_DEPTH) - - __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x * weights_stride_y; - -#if defined(DST_DEPTH) - __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE + b * src_stride_w; -#else /* defined(DST_DEPTH) */ - __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE; -#endif /* defined(DST_DEPTH) */ - - int z_coord = 0; - int4 offset = 0; - int4 y_coord = ((int4)(y * NUM_ROWS_PROCESSED) + (int4)(0, 1, 2, 3)) - (int)CONV_PAD_LEFT; - - // Only for y = 0 we can have a negative coordinate. If so, we convert it to SRC_DIM_1 - y_coord.s0 = min((uint)y_coord.s0, (uint)SRC_DIM_1); - y_coord.s1 = min((uint)y_coord.s1, (uint)SRC_DIM_1); - y_coord.s2 = min((uint)y_coord.s2, (uint)SRC_DIM_1); - y_coord.s3 = min((uint)y_coord.s3, (uint)SRC_DIM_1); - - int4 y_offset = convert_int4(y_coord * (int)src_stride_y); - - // We compute 4x2x1 [C,W,H] elements - VEC_INT acc0 = 0; - VEC_INT acc1 = 0; - VEC_INT sum0 = 0; - VEC_INT sum1 = 0; - - // Load weights - VEC_TYPE(16) - w0 = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr)); - VEC_TYPE(16) - w1 = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr + 16)); - VEC_TYPE(4) - w2 = VLOAD(4)(0, (__global WEIGHTS_TYPE *)(weights_addr + 32)); - -#if INPUT_OFFSET != 0 - // Initilize the final result with the weights reduction multiplied by INPUT_OFFSET - DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s0, w0.s01234567, w0.s8); - DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s1, (VEC_TYPE(8))((w0.s9ABC), (w0.sDEF), w1.s0), w1.s1); - DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s2, w1.s23456789, w1.sA); - DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s3, (VEC_TYPE(8))((w1.sBCD), (w1.sEF), (w2.s012)), w2.s3); - - // Multiply the weights reduction with INPUT_OFFSET - acc0 = INPUT_OFFSET * acc0; - - acc1 = acc0; -#endif // INPUT_OFFSET != 0 - - // Load input values - // z == 0 - // Clamp z_coord as for z = 0, it can be negative - // z_coord is casted to unsigned int in order to use just a min() operation - // A "-1" 32 bit signed variable converted to unsigned gives 4294967295 - z_coord = z - (int)CONV_PAD_TOP; - z_coord = min((uint)z_coord, (uint)SRC_DIM_2); - offset = y_offset + (int4)(z_coord * src_stride_z); - offset = min(offset, (int4)max_offset); - - VEC_TYPE(VEC_SIZE) - values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); - VEC_TYPE(VEC_SIZE) - values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); - VEC_TYPE(VEC_SIZE) - values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); - VEC_TYPE(VEC_SIZE) - values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3)); - - // z == 1 - // z_coord can be only negative for z = 0 so we do not need to clamp it - // Moreover z_coord cannot be out-of-bound for z = 1 so we do not need to clamp the offset - z_coord = z - (int)CONV_PAD_TOP + 1; - offset = y_offset + (int4)(z_coord * src_stride_z); - VEC_TYPE(VEC_SIZE) - values4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); - VEC_TYPE(VEC_SIZE) - values5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); - VEC_TYPE(VEC_SIZE) - values6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); - VEC_TYPE(VEC_SIZE) - values7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3)); - - // z == 2 - // After z = 1 we can simply add src_stride_z to offset without updating z_coord - // However offset can be out-of-bound so we need to check if it is greater than max_offset - offset += (int4)src_stride_z; - offset = min(offset, (int4)max_offset); - VEC_TYPE(VEC_SIZE) - values8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); - VEC_TYPE(VEC_SIZE) - values9 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); - VEC_TYPE(VEC_SIZE) - values10 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); - VEC_TYPE(VEC_SIZE) - values11 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3)); - - DOT_PRODUCT_REDUCTION(sum0.s0, values0.s0, values1.s0, values2.s0, values4.s0, values5.s0, values6.s0, values8.s0, values9.s0, values10.s0); - DOT_PRODUCT_REDUCTION(sum1.s0, values1.s0, values2.s0, values3.s0, values5.s0, values6.s0, values7.s0, values9.s0, values10.s0, values11.s0); - DOT_PRODUCT(acc0.s0, values0.s0, values1.s0, values2.s0, values4.s0, values5.s0, values6.s0, values8.s0, values9.s0, values10.s0, w0.s01234567, w0.s8); - DOT_PRODUCT(acc1.s0, values1.s0, values2.s0, values3.s0, values5.s0, values6.s0, values7.s0, values9.s0, values10.s0, values11.s0, w0.s01234567, w0.s8); - - DOT_PRODUCT_REDUCTION(sum0.s1, values0.s1, values1.s1, values2.s1, values4.s1, values5.s1, values6.s1, values8.s1, values9.s1, values10.s1); - DOT_PRODUCT_REDUCTION(sum1.s1, values1.s1, values2.s1, values3.s1, values5.s1, values6.s1, values7.s1, values9.s1, values10.s1, values11.s1); - DOT_PRODUCT(acc0.s1, values0.s1, values1.s1, values2.s1, values4.s1, values5.s1, values6.s1, values8.s1, values9.s1, values10.s1, (VEC_TYPE(8))((w0.s9ABC), (w0.sDEF), w1.s0), w1.s1); - DOT_PRODUCT(acc1.s1, values1.s1, values2.s1, values3.s1, values5.s1, values6.s1, values7.s1, values9.s1, values10.s1, values11.s1, (VEC_TYPE(8))((w0.s9ABC), (w0.sDEF), w1.s0), w1.s1); - - DOT_PRODUCT_REDUCTION(sum0.s2, values0.s2, values1.s2, values2.s2, values4.s2, values5.s2, values6.s2, values8.s2, values9.s2, values10.s2); - DOT_PRODUCT_REDUCTION(sum1.s2, values1.s2, values2.s2, values3.s2, values5.s2, values6.s2, values7.s2, values9.s2, values10.s2, values11.s2); - DOT_PRODUCT(acc0.s2, values0.s2, values1.s2, values2.s2, values4.s2, values5.s2, values6.s2, values8.s2, values9.s2, values10.s2, w1.s23456789, w1.sA); - DOT_PRODUCT(acc1.s2, values1.s2, values2.s2, values3.s2, values5.s2, values6.s2, values7.s2, values9.s2, values10.s2, values11.s2, w1.s23456789, w1.sA); - - DOT_PRODUCT_REDUCTION(sum0.s3, values0.s3, values1.s3, values2.s3, values4.s3, values5.s3, values6.s3, values8.s3, values9.s3, values10.s3); - DOT_PRODUCT_REDUCTION(sum1.s3, values1.s3, values2.s3, values3.s3, values5.s3, values6.s3, values7.s3, values9.s3, values10.s3, values11.s3); - DOT_PRODUCT(acc0.s3, values0.s3, values1.s3, values2.s3, values4.s3, values5.s3, values6.s3, values8.s3, values9.s3, values10.s3, (VEC_TYPE(8))((w1.sBCD), (w1.sEF), (w2.s012)), w2.s3); - DOT_PRODUCT(acc1.s3, values1.s3, values2.s3, values3.s3, values5.s3, values6.s3, values7.s3, values9.s3, values10.s3, values11.s3, (VEC_TYPE(8))((w1.sBCD), (w1.sEF), (w2.s012)), w2.s3); - -#if defined(HAS_BIAS) - Vector biases = CONVERT_TO_VECTOR_STRUCT(biases); - - VEC_INT bias_values = VLOAD(VEC_SIZE)(0, (__global int *)biases.ptr); - - acc0 += bias_values; - acc1 += bias_values; - -#endif // defined(HAS_BIAS) - -#if WEIGHTS_OFFSET != 0 - acc0 += WEIGHTS_OFFSET * sum0; - acc1 += WEIGHTS_OFFSET * sum1; -#endif // WEIGHTS_OFFSET != 0 - -#if K_OFFSET != 0 - acc0 += (VEC_INT)K_OFFSET; - acc1 += (VEC_INT)K_OFFSET; - -#endif // K_OFFSET != 0 - -#if defined(REAL_MULTIPLIER) - - acc0 = CONVERT(round(CONVERT(acc0, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT); - acc1 = CONVERT(round(CONVERT(acc1, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT); - -#else // defined(REAL_MULTIPLIER) - -#if OUTPUT_SHIFT < 0 - acc0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE); - acc1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE); -#else // OUTPUT_SHIFT < 0 - acc0 = asymm_mult_by_quant_multiplier_less_than_one(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT); - acc1 = asymm_mult_by_quant_multiplier_less_than_one(acc1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT); -#endif // OUTPUT_SHIFT < 0 - -#endif // defined(REAL_MULTIPLIER) - acc0 += (VEC_INT)OUTPUT_OFFSET; - acc1 += (VEC_INT)OUTPUT_OFFSET; - - VEC_TYPE(VEC_SIZE) - res0 = CONVERT_SAT(acc0, VEC_TYPE(VEC_SIZE)); - VEC_TYPE(VEC_SIZE) - res1 = CONVERT_SAT(acc1, VEC_TYPE(VEC_SIZE)); - -#if defined(DST_DEPTH) - __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + z * dst_step_z + b * dst_stride_w; -#else /* defined(DST_DEPTH) */ - __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + z * dst_step_z; -#endif /* defined(DST_DEPTH) */ - - VSTORE(VEC_SIZE) - (ACTIVATION_FUNC(res0), 0, (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y)); - VSTORE(VEC_SIZE) - (ACTIVATION_FUNC(res1), 0, (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y)); -} -#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) && VEC_SIZE==4 - -#endif // defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED) - #endif // defined(VEC_SIZE) && defined(SRC_DIM_1) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT) #endif // defined(WEIGHTS_PROMOTED_TYPE) @@ -1612,7 +783,7 @@ __kernel void dwc_3x3_reshaped_quantized8_dot8_stride1_nhwc( #endif // defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && ((defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)) || defined(REAL_MULTIPLIER)) #if defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(N0) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(INPUT_OFFSET) && defined(WEIGHTS_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_SHIFT) && defined(OUTPUT_MULTIPLIER) && defined(VEC_SIZE_LEFTOVER) -/** This function computes the depthwise convolution for NHWC data layout. This kernel assumes that the weights tensor is NOT reshaped +/** This function computes the depthwise convolution for NHWC data layout. * * @note The number of elements processed must be passed at compile time using -DN0 (e.g. -DN0=2) * @note The depth multiplier must be passed at compile time using -DDEPTH_MULTIPLIER (e.g. -DDEPTH_MULTIPLIER=1) diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp index 287a965f5b..dda70d2231 100644 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp +++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.cpp @@ -114,20 +114,19 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, } std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, - unsigned int depth_multiplier, GPUTarget gpu_target, std::string &kernel_name, const Size2D dilation) + unsigned int depth_multiplier, std::string &kernel_name, const Size2D dilation) { // Output auto inizialitation if not yet initialized const ConvolutionInfo info { conv_info, depth_multiplier, ActivationLayerInfo(), dilation }; - const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, info); + const TensorShape output_shape = compute_depthwise_convolution_shape(*input, *weights, info); auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_quantization_info(output->quantization_info())); const unsigned int conv_stride_x = conv_info.stride().first; const unsigned int conv_stride_y = conv_info.stride().second; const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type()); - const bool is_bifrost = get_arch_from_target(gpu_target) == GPUTarget::BIFROST; // Configure kernel window unsigned int num_elems_read_per_iteration_x = 0; @@ -156,31 +155,28 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen num_elems_read_per_iteration_x = 3 + (num_elems_written_per_iteration_x - 1) * conv_stride_x; break; } - if(is_bifrost) + if(conv_stride_x == 1 && conv_stride_y == 1) { - if(conv_stride_x == 1 && conv_stride_y == 1) - { - kernel_name = "depthwise_convolution_3x3_stridex1_stridey1_bifrost_f16"; - num_elems_read_per_iteration_x = 8; - num_elems_written_per_iteration_x = 4; - num_elems_read_per_iteration_y = 6; - num_elems_written_per_iteration_y = 4; - } - else if(conv_stride_x == 2 && conv_stride_y == 2) - { - kernel_name = "depthwise_convolution_3x3_stridex2_stridey2_bifrost_f16"; - num_elems_read_per_iteration_x = 10; - num_elems_written_per_iteration_x = 4; - num_elems_read_per_iteration_y = 5; - num_elems_written_per_iteration_y = 2; - } + kernel_name = "depthwise_convolution_3x3_stridex1_stridey1_f16"; + num_elems_read_per_iteration_x = 8; + num_elems_written_per_iteration_x = 4; + num_elems_read_per_iteration_y = 6; + num_elems_written_per_iteration_y = 4; + } + else if(conv_stride_x == 2 && conv_stride_y == 2) + { + kernel_name = "depthwise_convolution_3x3_stridex2_stridey2_f16"; + num_elems_read_per_iteration_x = 10; + num_elems_written_per_iteration_x = 4; + num_elems_read_per_iteration_y = 5; + num_elems_written_per_iteration_y = 2; } } - else if(input->data_type() == DataType::F32 && is_bifrost) + else if(input->data_type() == DataType::F32) { if(conv_stride_x == 1 && conv_stride_y == 1) { - kernel_name = "depthwise_convolution_3x3_stridex1_stridey1_bifrost_f32"; + kernel_name = "depthwise_convolution_3x3_stridex1_stridey1_f32"; num_elems_read_per_iteration_x = 4; num_elems_read_per_iteration_y = 6; num_elems_written_per_iteration_x = 2; @@ -188,7 +184,7 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen } else if(conv_stride_x == 2 && conv_stride_y == 2) { - kernel_name = "depthwise_convolution_3x3_stridex2_stridey2_bifrost_f32"; + kernel_name = "depthwise_convolution_3x3_stridex2_stridey2_f32"; num_elems_read_per_iteration_x = 6; num_elems_read_per_iteration_y = 5; num_elems_written_per_iteration_x = 2; @@ -239,7 +235,7 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen } // namespace CLDepthwiseConvolutionLayer3x3NCHWKernel::CLDepthwiseConvolutionLayer3x3NCHWKernel() - : _conv_stride_x(0), _conv_pad_top(0), _conv_pad_left(0) + : _border_size(0), _input(), _output(), _weights(), _biases(), _conv_stride_y(1), _output_multipliers(), _output_shifts(), _is_quantized(false), _conv_stride_x(0), _conv_pad_top(0), _conv_pad_left(0) { } @@ -278,10 +274,9 @@ void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(const CLCompileContext _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type()); // Configure kernel window - std::string kernel_name; - const GPUTarget gpu_target = get_target(); + std::string kernel_name; - auto win_config = validate_and_configure_window(input->info(), weights->info(), output->info(), conv_info, depth_multiplier, gpu_target, kernel_name, dilation); + auto win_config = validate_and_configure_window(input->info(), weights->info(), output->info(), conv_info, depth_multiplier, kernel_name, dilation); ARM_COMPUTE_ERROR_THROW_ON(win_config.first); ICLKernel::configure_internal(win_config.second); @@ -372,13 +367,13 @@ void CLDepthwiseConvolutionLayer3x3NCHWKernel::configure(const CLCompileContext } Status CLDepthwiseConvolutionLayer3x3NCHWKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, - const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, GPUTarget gpu_target, + const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts) { std::string kernel_name; ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, output_multipliers, output_shifts)); ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), output->clone().get(), - conv_info, depth_multiplier, gpu_target, kernel_name, dilation) + conv_info, depth_multiplier, kernel_name, dilation) .first); return Status{}; diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h index 45b5869676..c4e475f6f2 100644 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h +++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,7 +24,7 @@ #ifndef ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNCHWKERNEL3x3_H #define ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNCHWKERNEL3x3_H -#include "src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h" +#include "src/core/CL/ICLKernel.h" namespace arm_compute { @@ -32,11 +32,19 @@ class ICLTensor; /** Interface for the kernel to run a 3x3 depthwise convolution on a tensor when the data layout is NCHW. */ -class CLDepthwiseConvolutionLayer3x3NCHWKernel : public ICLDepthwiseConvolutionLayer3x3Kernel +class CLDepthwiseConvolutionLayer3x3NCHWKernel : public ICLKernel { public: /** Default constructor */ CLDepthwiseConvolutionLayer3x3NCHWKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLDepthwiseConvolutionLayer3x3NCHWKernel(const CLDepthwiseConvolutionLayer3x3NCHWKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLDepthwiseConvolutionLayer3x3NCHWKernel &operator=(const CLDepthwiseConvolutionLayer3x3NCHWKernel &) = delete; + /** Default Move Constructor. */ + CLDepthwiseConvolutionLayer3x3NCHWKernel(CLDepthwiseConvolutionLayer3x3NCHWKernel &&) = default; + /** Default move assignment operator */ + CLDepthwiseConvolutionLayer3x3NCHWKernel &operator=(CLDepthwiseConvolutionLayer3x3NCHWKernel &&) = default; /** Initialize the function's source, destination, conv and border_size. * * @param[in] input Source tensor. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32. @@ -56,7 +64,7 @@ public: */ void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U), - const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) override; + const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr); /** Initialize the function's source, destination, conv and border_size. * * @param[in] compile_context The compile context to be used. @@ -77,7 +85,7 @@ public: */ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U), - const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) override; + const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr); /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3NCHWKernel * * @param[in] input Source tensor info. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32. @@ -89,7 +97,6 @@ public: * @param[in] conv_info Padding and stride information to use for the convolution. * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1. * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported. - * @param[in] gpu_target (Optional) GPU target to validate the kernel for. Defaults to midgard. * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). * @param[in] output_multipliers (Optional) Output multipliers tensor info for quantized computations. In case of per-channel quantization, * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 @@ -99,13 +106,23 @@ public: * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, - unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), GPUTarget gpu_target = GPUTarget::MIDGARD, + unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U), const ITensorInfo *output_multipliers = nullptr, const ITensorInfo *output_shifts = nullptr); void run(const Window &window, cl::CommandQueue &queue) override; BorderSize border_size() const override; private: + BorderSize _border_size; + const ICLTensor *_input; + ICLTensor *_output; + const ICLTensor *_weights; + const ICLTensor *_biases; + unsigned int _conv_stride_y; + const ICLTensor *_output_multipliers; + const ICLTensor *_output_shifts; + bool _is_quantized; + unsigned int _conv_stride_x; unsigned int _conv_pad_top; unsigned int _conv_pad_left; diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp index f7603e6397..2a1365e6e2 100644 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp +++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.cpp @@ -30,7 +30,6 @@ #include "arm_compute/core/TensorInfo.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "src/core/AccessWindowStatic.h" #include "src/core/CL/CLValidate.h" #include "src/core/CL/ICLKernel.h" @@ -43,17 +42,11 @@ namespace arm_compute namespace { Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, - const PadStrideInfo &conv_info, unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation, - const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts) + const PadStrideInfo &conv_info, unsigned int depth_multiplier, const ActivationLayerInfo &act_info, const Size2D &dilation) { + ARM_COMPUTE_UNUSED(act_info); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32, DataType::QASYMM8, DataType::QASYMM8_SIGNED); - ARM_COMPUTE_RETURN_ERROR_ON_MSG((act_info.enabled()) && (input->data_type() == DataType::QASYMM8 || input->data_type() == DataType::QASYMM8_SIGNED) - && (act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) - && (act_info.activation() != ActivationLayerInfo::ActivationFunction::BOUNDED_RELU) - && (act_info.activation() != ActivationLayerInfo::ActivationFunction::RELU) - && (act_info.activation() != ActivationLayerInfo::ActivationFunction::LOGISTIC), - "For QASYMM8 only logistic, relu, lower bounded relu and lower-upper bounded relu are supported"); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); ARM_COMPUTE_RETURN_ERROR_ON(depth_multiplier > 1); // COMPMID-1071 Add depth multiplier support for NHWC ARM_COMPUTE_RETURN_ERROR_ON(conv_info.stride().first < 1); @@ -61,54 +54,21 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, ARM_COMPUTE_RETURN_ERROR_ON((dilation.x() < 1) || (dilation.y() < 1)); - const bool is_qasymm = is_data_type_quantized_asymmetric(input->data_type()); const size_t weights_width = 3; const size_t weights_height = 3; const ConvolutionInfo info{ conv_info, depth_multiplier, ActivationLayerInfo(), dilation }; - const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape( - *input, TensorInfo(TensorShape(weights_width, weights_height), 1, weights->data_type()).set_data_layout(DataLayout::NCHW), info); - if(is_qasymm) - { - DepthwiseConvolutionReshapeInfo info; - info.c0 = 4; - ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(0) / info.c0) != weights_width * weights_height); - - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output_multipliers, output_shifts); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32); - ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1); - ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1); - - if(is_data_type_quantized_per_channel(weights->data_type())) - { - ARM_COMPUTE_RETURN_ERROR_ON(output_shape[0] != output_multipliers->dimension(0)); - ARM_COMPUTE_RETURN_ERROR_ON(output_shape[0] != output_shifts->dimension(0)); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); - ARM_COMPUTE_RETURN_ERROR_ON(1 != output_multipliers->dimension(0)); - ARM_COMPUTE_RETURN_ERROR_ON(1 != output_shifts->dimension(0)); - } - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); - ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(1) != weights_width) || (weights->dimension(2) != weights_height)); - } + + const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_depthwise_convolution_shape( + *input, TensorInfo(TensorShape(weights_width, weights_height), 1, weights->data_type()).set_data_layout(DataLayout::NCHW), info); + + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights); + ARM_COMPUTE_RETURN_ERROR_ON((weights->dimension(1) != weights_width) || (weights->dimension(2) != weights_height)); if(biases != nullptr) { ARM_COMPUTE_RETURN_ERROR_ON(biases->dimension(0) != output_shape[0]); - if(is_qasymm) - { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases); - } + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(weights, biases); ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1); } @@ -122,10 +82,9 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights, } std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *output, - const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation, - ITensorInfo *output_multipliers, ITensorInfo *output_shifts) + const PadStrideInfo &conv_info, unsigned int depth_multiplier, const Size2D &dilation) { - ARM_COMPUTE_UNUSED(weights); + ARM_COMPUTE_UNUSED(weights, bias); ARM_COMPUTE_UNUSED(depth_multiplier); const bool is_stride_1_dilation_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1) && dilation.x() == 1 && dilation.y() == 1); @@ -134,115 +93,46 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen Window win{}; Status err{}; - if(is_data_type_quantized_asymmetric(input->data_type())) - { - const unsigned int num_elems_accessed_per_iteration = 4; - const unsigned int num_rows_read_per_iteration = num_rows_processed_per_iteration + 2; - const unsigned int num_rows_written_per_iteration = std::ceil(num_rows_processed_per_iteration / static_cast(conv_info.stride().first)); - - BorderSize border_size; - border_size = BorderSize(conv_info.pad_left(), 0, std::max(std::max(conv_info.pad_right(), conv_info.pad_bottom()), conv_info.pad_top()), 0); - - // Configure kernel window - win = calculate_max_window(*output, Steps(num_elems_accessed_per_iteration, num_rows_written_per_iteration)); - - AccessWindowStatic input_access(input, 0, -border_size.top, ceil_to_multiple(input->dimension(0), num_elems_accessed_per_iteration), - ceil_to_multiple(input->dimension(1) + border_size.bottom, num_rows_read_per_iteration)); - AccessWindowRectangle output_access(output, 0, 0, num_elems_accessed_per_iteration, num_rows_written_per_iteration); - - bool window_changed = false; - - if((output_multipliers != nullptr) && (output_shifts != nullptr)) - { - AccessWindowHorizontal output_multipliers_access(output_multipliers, 0, num_elems_accessed_per_iteration); - AccessWindowHorizontal output_shifts_access(output_shifts, 0, num_elems_accessed_per_iteration); - window_changed = window_changed || update_window_and_padding(win, input_access, output_access, output_multipliers_access, output_shifts_access); - } - else - { - Status err = ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "output_multipliers and output_shifts must be non-nullptr for quantized input"); - return std::make_pair(err, win); - } - - if(bias != nullptr) - { - AccessWindowHorizontal bias_access(bias, 0, num_elems_accessed_per_iteration); - window_changed = window_changed || update_window_and_padding(win, bias_access); - } - - err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; - } - else - { - unsigned int num_elems_accessed_per_iteration = adjust_vec_size(4 / input->element_size(), input->dimension(0)); - win = calculate_max_window(*output, Steps(num_elems_accessed_per_iteration, num_rows_processed_per_iteration)); - } + unsigned int num_elems_accessed_per_iteration = adjust_vec_size(4 / input->element_size(), input->dimension(0)); + win = calculate_max_window(*output, Steps(num_elems_accessed_per_iteration, num_rows_processed_per_iteration)); return std::make_pair(err, win); } } // namespace CLDepthwiseConvolutionLayer3x3NHWCKernel::CLDepthwiseConvolutionLayer3x3NHWCKernel() - : _num_planes_processed_per_iteration(1) -{ -} - -BorderSize CLDepthwiseConvolutionLayer3x3NHWCKernel::border_size() const + : _input(), _output(), _weights(), _biases(), _num_planes_processed_per_iteration(1) { - return _border_size; } void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, - const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation, - const ICLTensor *output_multipliers, const ICLTensor *output_shifts) + const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation) { - configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, output_multipliers, output_shifts); + configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation); } void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, - const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation, - const ICLTensor *output_multipliers, const ICLTensor *output_shifts) + const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), - conv_info, depth_multiplier, act_info, dilation, - (output_multipliers != nullptr) ? output_multipliers->info() : nullptr, - (output_shifts != nullptr) ? output_shifts->info() : nullptr)); + conv_info, depth_multiplier, act_info, dilation)); auto padding_info = get_padding_info({ input, weights, biases, output }); auto win_config = validate_and_configure_window(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), - conv_info, depth_multiplier, dilation, - (output_multipliers != nullptr) ? output_multipliers->info() : nullptr, - (output_shifts != nullptr) ? output_shifts->info() : nullptr); + conv_info, depth_multiplier, dilation); - const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1)); - const bool is_stride_1_dilation_1 = (is_stride_1 && dilation.x() == 1 && dilation.y() == 1); - const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->info()->data_type()); - const bool is_dot8_supported = dot8_supported(CLKernelLibrary::get().get_device()) && !is_quantized_per_channel; + const bool is_stride_1 = ((conv_info.stride().first == conv_info.stride().second) && (conv_info.stride().first == 1)); + const bool is_stride_1_dilation_1 = (is_stride_1 && dilation.x() == 1 && dilation.y() == 1); _input = input; _output = output; _weights = weights; _biases = biases; - _conv_stride_y = conv_info.stride().second; _num_planes_processed_per_iteration = is_stride_1_dilation_1 ? 2 : 1; - _output_multipliers = output_multipliers; - _output_shifts = output_shifts; - _is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type()); - if(_is_quantized) - { - _border_size = BorderSize(input->info()->padding()); - - // If QASYMM8 and the 8 bit dot product is available, force _num_planes_processed_per_iteration to 1 - if(is_dot8_supported) - { - _num_planes_processed_per_iteration = 1; - } - } - - unsigned int num_elems_accessed_per_iteration = _is_quantized ? 4 : adjust_vec_size(4 / input->info()->element_size(), input->info()->dimension(0)); + unsigned int num_elems_accessed_per_iteration = adjust_vec_size(4 / input->info()->element_size(), input->info()->dimension(0)); unsigned int num_rows_processed_per_iteration = is_stride_1_dilation_1 ? 2 : 1; CLBuildOptions build_opts; @@ -257,54 +147,8 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const CLCompileContext build_opts.add_option_if(_biases != nullptr, "-DHAS_BIAS"); build_opts.add_option_if(_input->info()->tensor_shape().total_size_upper(3) > 1, "-DDST_DEPTH=" + support::cpp11::to_string(static_cast(std::ceil(_output->info()->dimension(2) / static_cast(_num_planes_processed_per_iteration))))); - - if(_is_quantized) - { - const UniformQuantizationInfo iq_info = _input->info()->quantization_info().uniform(); - const UniformQuantizationInfo wq_info = _weights->info()->quantization_info().uniform(); - const UniformQuantizationInfo oq_info = _output->info()->quantization_info().uniform(); - - build_opts.add_option("-DSRC_DIM_1=" + support::cpp11::to_string(_input->info()->dimension(1))); - build_opts.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-iq_info.offset)); - build_opts.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-wq_info.offset)); - build_opts.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(oq_info.offset)); - build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(9 * iq_info.offset * wq_info.offset)); - build_opts.add_option_if(is_quantized_per_channel, "-DPER_CHANNEL_QUANTIZATION"); - build_opts.add_option_if(is_dot8_supported, "-DIS_DOT8"); - - // Compute non-per-channel multiplier and shift anyway to make OpenCL kernel simpler - float multiplier = iq_info.scale * wq_info.scale / oq_info.scale; - int output_multiplier = 0; - int output_shift = 0; - quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift); - build_opts.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier)); - build_opts.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift)); - - if(act_info.enabled()) - { - int a_val{}; - int b_val{}; - std::tie(b_val, a_val) = get_quantized_activation_min_max(act_info, input->info()->data_type(), oq_info); - - const int o1 = oq_info.offset; - - build_opts.add_option("-DA_VAL=" + support::cpp11::to_string(a_val)); - build_opts.add_option("-DB_VAL=" + support::cpp11::to_string(b_val)); - build_opts.add_option("-DCONST_0=" + support::cpp11::to_string(o1)); - - const float s1 = iq_info.scale; - build_opts.add_option("-DS1_VAL=" + float_to_string_with_full_precision(s1)); - build_opts.add_option("-DO1_VAL=" + support::cpp11::to_string(o1)); - } - - build_opts.add_option("-DWEIGHTS_TYPE=" + get_cl_type_from_data_type(weights->info()->data_type())); - build_opts.add_option("-DWEIGHTS_PROMOTED_TYPE=" + get_cl_promoted_type_from_data_type(weights->info()->data_type())); - } - else - { - build_opts.add_option_if(act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(act_info.a())); - build_opts.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b())); - } + build_opts.add_option_if(act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(act_info.a())); + build_opts.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b())); if(is_stride_1_dilation_1) { @@ -317,30 +161,20 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const CLCompileContext else { build_opts.add_option("-DCONV_STRIDE_X=" + support::cpp11::to_string(conv_info.stride().first)); - build_opts.add_option("-DCONV_STRIDE_Y=" + support::cpp11::to_string(_conv_stride_y)); + build_opts.add_option("-DCONV_STRIDE_Y=" + support::cpp11::to_string(conv_info.stride().second)); build_opts.add_option("-DDILATION_X=" + support::cpp11::to_string(dilation.x())); build_opts.add_option("-DDILATION_Y=" + support::cpp11::to_string(dilation.y())); } - std::string kernel_name; // Create kernel - if(_is_quantized) - { - kernel_name = std::string("dwc_3x3_reshaped_quantized8"); - kernel_name += (is_dot8_supported && is_stride_1_dilation_1 ? "_dot8" : ""); - kernel_name += (is_stride_1_dilation_1 ? "_stride1" : ""); - kernel_name += "_nhwc"; - } - else - { - kernel_name = std::string("depthwise_convolution_3x3_nhwc"); - kernel_name += (is_stride_1_dilation_1 ? "_stride1" : ""); - } + std::string kernel_name; + kernel_name = std::string("depthwise_convolution_3x3_nhwc"); + kernel_name += (is_stride_1_dilation_1 ? "_stride1" : ""); ICLKernel::configure_internal(win_config.second); _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); - ARM_COMPUTE_ERROR_ON(!_is_quantized && has_padding_changed(padding_info)); + ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info)); // Set config_id for enabling LWS tuning _config_id = kernel_name; @@ -359,15 +193,12 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::configure(const CLCompileContext } Status CLDepthwiseConvolutionLayer3x3NHWCKernel::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, - const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation, - const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts) + const PadStrideInfo &conv_info, unsigned int depth_multiplier, ActivationLayerInfo act_info, const Size2D &dilation) { - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation, output_multipliers, output_shifts)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, weights, biases, output, conv_info, depth_multiplier, act_info, dilation)); ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), weights->clone().get(), biases != nullptr ? biases->clone().get() : nullptr, - output->clone().get(), conv_info, depth_multiplier, dilation, - (output_multipliers != nullptr) ? output_multipliers->clone().get() : nullptr, - (output_shifts != nullptr) ? output_shifts->clone().get() : nullptr) + output->clone().get(), conv_info, depth_multiplier, dilation) .first); return Status{}; } @@ -382,16 +213,7 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::run(const Window &window, cl::Com Window win = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); win.set(Window::DimZ, Window::Dimension(0, std::ceil(_output->info()->dimension(2) / static_cast(_num_planes_processed_per_iteration)) * total_batches, 1)); - unsigned int idx = 2 * num_arguments_per_4D_tensor() + (_is_quantized ? num_arguments_per_2D_tensor() : num_arguments_per_3D_tensor()); - - if(_is_quantized) - { - Window slice; - slice.use_tensor_dimensions(_output_multipliers->info()->tensor_shape()); - slice.set_dimension_step(Window::DimX, window.x().step()); - add_1D_tensor_argument(idx, _output_multipliers, slice); - add_1D_tensor_argument(idx, _output_shifts, slice); - } + unsigned int idx = 2 * num_arguments_per_4D_tensor() + num_arguments_per_3D_tensor(); if(_biases != nullptr) { @@ -401,62 +223,14 @@ void CLDepthwiseConvolutionLayer3x3NHWCKernel::run(const Window &window, cl::Com add_1D_tensor_argument(idx, _biases, win_biases); } - if(_is_quantized) - { - // Calculate the max_offset. - // max_offset is the offset for the last NOT valid value in the Z dimension (spatial dimension Y for NHWC) - // |******************| - // | pad_top | - // |******************| - // | | - // | plane0 | - // | batch0 | - // |__________________| - // |******************| Batch 0 - // | pad_bottom | - // | pad_top | - // |******************| - // | | - // | plane1 | - // | batch0 | - // |__________________|-----> max_offset - // |******************| - // | pad_bottom | - // | pad_top | - // |******************| - // | | - // | plane0 | - // | batch1 | - // |__________________| - // |******************| Batch 1 - // | pad_bottom | - // | pad_top | - // |******************| - // | | - // | plane1 | - // | batch1 | - // |__________________| - // | pad_bottom | - // |******************| - const int max_offset = ((_input->info()->dimension(1) * _input->info()->dimension(2)) + (_input->info()->padding().bottom + _input->info()->padding().top) * (_input->info()->dimension( - 2) - 1)) * _input->info()->strides_in_bytes().y(); - _kernel.setArg(idx, max_offset); - } - Window slice = win.first_slice_window_4D(); do { unsigned int idx = 0; add_4D_tensor_argument(idx, _input, slice); add_4D_tensor_argument(idx, _output, slice); - if(_is_quantized) - { - add_2D_tensor_argument(idx, _weights, slice); - } - else - { - add_3D_tensor_argument(idx, _weights, slice); - } + add_3D_tensor_argument(idx, _weights, slice); + enqueue(queue, *this, slice, lws_hint()); } while(win.slide_window_slice_4D(slice)); diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h index ce0bf5ceb3..ee47d98807 100644 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h +++ b/src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020 Arm Limited. + * Copyright (c) 2018-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,7 +24,7 @@ #ifndef ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNHWCKERNEL3x3_H #define ARM_COMPUTE_CLDEPTHWISECONVOLUTIONNHWCKERNEL3x3_H -#include "src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h" +#include "src/core/CL/ICLKernel.h" namespace arm_compute { @@ -32,81 +32,78 @@ class ICLTensor; /** Interface for the kernel to run a 3x3 depthwise convolution on a tensor when the data layout is NHWC. */ -class CLDepthwiseConvolutionLayer3x3NHWCKernel : public ICLDepthwiseConvolutionLayer3x3Kernel +class CLDepthwiseConvolutionLayer3x3NHWCKernel : public ICLKernel { public: /** Default constructor */ CLDepthwiseConvolutionLayer3x3NHWCKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLDepthwiseConvolutionLayer3x3NHWCKernel(const CLDepthwiseConvolutionLayer3x3NHWCKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLDepthwiseConvolutionLayer3x3NHWCKernel &operator=(const CLDepthwiseConvolutionLayer3x3NHWCKernel &) = delete; + /** Default Move Constructor. */ + CLDepthwiseConvolutionLayer3x3NHWCKernel(CLDepthwiseConvolutionLayer3x3NHWCKernel &&) = default; + /** Default move assignment operator */ + CLDepthwiseConvolutionLayer3x3NHWCKernel &operator=(CLDepthwiseConvolutionLayer3x3NHWCKernel &&) = default; /** Default move assignment operator. */ /** Initialize the function's source, destination, conv and border_size. * - * @param[in] input Source tensor. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32. - * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, 3, 3]. - * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8/QASYMM8_SIGNED. - * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed. - * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED. - * @param[out] output Destination tensor. Data type supported: Same as @p input. - * @param[in] conv_info Padding and stride information to use for the convolution. - * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported. - * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). - * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization, - * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 - * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization, - * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 + * @param[in] input Source tensor. DataType supported: F16/F32. + * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, 3, 3]. + * Data type supported: Same as @p input. + * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed. + * Data type supported: Same as @p input. + * @param[out] output Destination tensor. Data type supported: Same as @p input. + * @param[in] conv_info Padding and stride information to use for the convolution. + * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported. + * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). */ void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, - unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U), - const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) override; + unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U)); /** Initialize the function's source, destination, conv and border_size. * - * @param[in] compile_context The compile context to be used. - * @param[in] input Source tensor. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32. - * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, 3, 3]. - * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8/QASYMM8_SIGNED. - * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed. - * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED. - * @param[out] output Destination tensor. Data type supported: Same as @p input. - * @param[in] conv_info Padding and stride information to use for the convolution. - * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported. - * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). - * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization, - * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 - * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization, - * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 + * @param[in] compile_context The compile context to be used. + * @param[in] input Source tensor. DataType supported: F16/F32. + * @param[in] weights Weights tensor. A 3D tensor with dimensions [IFM, 3, 3]. + * Data type supported: Same as @p input. + * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed. + * Data type supported: Same as @p input. + * @param[out] output Destination tensor. Data type supported: Same as @p input. + * @param[in] conv_info Padding and stride information to use for the convolution. + * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported. + * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). */ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, - unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U), - const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) override; + unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U)); /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3NHWCKernel * - * @param[in] input Source tensor info. DataType supported: QASYMM8/QASYMM8_SIGNED/F16/F32. - * @param[in] weights Weights tensor info. A 3D tensor with dimensions [IFM, 3, 3]. - * Data type supported: Same as @p input or QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL when @p input is QASYMM8/QASYMM8_SIGNED. - * @param[in] biases Biases tensor info. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed. - * Data type supported: Same as @p input, S32 when input is QASYMM8/QASYMM8_SIGNED. - * @param[in] output Destination tensor info. Data type supported: Same as @p input. - * @param[in] conv_info Padding and stride information to use for the convolution. - * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported. - * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). - * @param[in] output_multipliers (Optional) Output multipliers tensor info for quantized computations. In case of per-channel quantization, - * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 - * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization, - * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 + * @param[in] input Source tensor info. DataType supported: F16/F32. + * @param[in] weights Weights tensor info. A 3D tensor with dimensions [IFM, 3, 3]. + * Data type supported: Same as @p input. + * @param[in] biases Biases tensor info. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed. + * Data type supported: Same as @p input. + * @param[in] output Destination tensor info. Data type supported: Same as @p input. + * @param[in] conv_info Padding and stride information to use for the convolution. + * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1. + * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported. + * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). * * @return a status */ static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, - unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U), - const ITensorInfo *output_multipliers = nullptr, const ITensorInfo *output_shifts = nullptr); + unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U)); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; - BorderSize border_size() const override; private: + const ICLTensor *_input; + ICLTensor *_output; + const ICLTensor *_weights; + const ICLTensor *_biases; + unsigned int _num_planes_processed_per_iteration; }; } // namespace arm_compute diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp b/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp deleted file mode 100644 index 386d634cea..0000000000 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.cpp +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright (c) 2019-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibrary.h" -#include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "src/core/AccessWindowStatic.h" -#include "src/core/CL/CLValidate.h" -#include "src/core/CL/ICLKernel.h" -#include "src/core/helpers/AutoConfiguration.h" -#include "src/core/helpers/WindowHelpers.h" -#include "support/StringSupport.h" - -namespace arm_compute -{ -namespace -{ -Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const DepthwiseConvolutionReshapeInfo &info) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - const size_t idx_w = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::WIDTH); - const size_t idx_h = get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::HEIGHT); - - ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(input, DataLayout::NHWC); - ARM_COMPUTE_RETURN_ERROR_ON(info.c0 != 4); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_h) != 3); - ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(idx_w) != 3); - ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); - - if(output->total_size() != 0) - { - auto reshaped_weights_shape = arm_compute::misc::shape_calculator::compute_reshaped_depthwise_weights_shape(*input, info); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), reshaped_weights_shape); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); - } - - return Status{}; -} - -std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const DepthwiseConvolutionReshapeInfo &info) -{ - auto reshaped_input_shape = arm_compute::misc::shape_calculator::compute_reshaped_depthwise_weights_shape(*input, info); - auto_init_if_empty(*output, reshaped_input_shape, 1, input->data_type(), input->quantization_info()); - - Window win = calculate_max_window(*input, Steps(info.c0)); - AccessWindowHorizontal weights_access(input, 0, info.c0); - const bool window_changed = update_window_and_padding(win, weights_access); - - Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; - return std::make_pair(err, win); -} -} // namespace - -CLDepthwiseConvolutionLayerReshapeWeightsKernel::CLDepthwiseConvolutionLayerReshapeWeightsKernel() - : _input(nullptr), _output(nullptr) -{ -} - -void CLDepthwiseConvolutionLayerReshapeWeightsKernel::configure(const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info) -{ - configure(CLKernelLibrary::get().get_compile_context(), input, output, info); -} - -void CLDepthwiseConvolutionLayerReshapeWeightsKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), info)); - auto win_config = validate_and_configure_window(input->info(), output->info(), info); - ARM_COMPUTE_ERROR_THROW_ON(win_config.first); - - ICLKernel::configure_internal(win_config.second); - - _input = input; - _output = output; - - // Build the kernel - CLBuildOptions build_opts; - build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(info.c0)); - build_opts.add_option("-DDST_WIDTH=" + support::cpp11::to_string(_output->info()->dimension(0))); - build_opts.add_option_if(info.transpose, "-DTRANSPOSE"); - build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(input->info()->element_size())); - - _kernel = create_kernel(compile_context, "depthwise_convolution_reshape_weights", build_opts.options()); -} - -Status CLDepthwiseConvolutionLayerReshapeWeightsKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const DepthwiseConvolutionReshapeInfo &info) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, info)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), info).first); - return Status{}; -} - -void CLDepthwiseConvolutionLayerReshapeWeightsKernel::run(const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); - - unsigned int idx = 0; - add_3D_tensor_argument(idx, _input, window); - add_2D_tensor_argument(idx, _output, window); - enqueue(queue, *this, window, lws_hint()); -} -} // namespace arm_compute diff --git a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h b/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h deleted file mode 100644 index 650fe9a11b..0000000000 --- a/src/core/CL/kernels/CLDepthwiseConvolutionLayerReshapeWeightsKernel.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2019-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERRESHAPEWEIGHTSKERNEL_H -#define ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERRESHAPEWEIGHTSKERNEL_H - -#include "src/core/CL/ICLKernel.h" - -namespace arm_compute -{ -class ICLTensor; - -/** Interface for the kernel to reshape the weights of depthwise convolution. */ -class CLDepthwiseConvolutionLayerReshapeWeightsKernel : public ICLKernel -{ -public: - /** Default constructor */ - CLDepthwiseConvolutionLayerReshapeWeightsKernel(); - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLDepthwiseConvolutionLayerReshapeWeightsKernel(const CLDepthwiseConvolutionLayerReshapeWeightsKernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - CLDepthwiseConvolutionLayerReshapeWeightsKernel &operator=(const CLDepthwiseConvolutionLayerReshapeWeightsKernel &) = delete; - /** Default Move Constructor. */ - CLDepthwiseConvolutionLayerReshapeWeightsKernel(CLDepthwiseConvolutionLayerReshapeWeightsKernel &&) = default; - /** Default move assignment operator */ - CLDepthwiseConvolutionLayerReshapeWeightsKernel &operator=(CLDepthwiseConvolutionLayerReshapeWeightsKernel &&) = default; - - /** Initialize the function's source and destination. - * - * @param[in] input The input tensor of dimension [IFM, W, H]. Data types supported: All. Data layouts supported: NHWC - * @param[out] output The output tensor of dimension [W*H*C0, ceil(IFM/C0)]. C0 is the number of channels read by each thread. Data types supported: same as @p weights. - * @param[in] info Depthwise convolution information to reshape the input tensor. - */ - void configure(const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info); - /** Initialize the function's source and destination. - * - * @param[in] compile_context The compile context to be used. - * @param[in] input The input tensor of dimension [IFM, W, H]. Data types supported: All. Data layouts supported: NHWC - * @param[out] output The output tensor of dimension [W*H*C0, ceil(IFM/C0)]. C0 is the number of channels read by each thread. Data types supported: same as @p weights. - * @param[in] info Depthwise convolution information to reshape the input tensor. - */ - void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, const DepthwiseConvolutionReshapeInfo &info); - - /** Static function to check if given info will lead to a valid configuration of @ref CLDepthwiseConvolutionLayer3x3NHWCKernel - * - * @param[in] input The input tensor info of dimension [IFM, W, H]. Data types supported: All. Data layouts supported: NHWC - * @param[in] output The output tensor info of dimension [W*H*C0, ceil(IFM/C0)]. C0 is the number of channels read by each thread. Data types supported: same as @p weights. - * @param[in] info Depthwise convolution information to reshape the input tensor. - * - * @return a Status - */ - static Status validate(const ITensorInfo *input, const ITensorInfo *output, const DepthwiseConvolutionReshapeInfo &info); - - // Inherited methods overridden: - void run(const Window &window, cl::CommandQueue &queue) override; - -private: - const ICLTensor *_input; - ICLTensor *_output; - - void configure_dot_product(const DepthwiseConvolutionReshapeInfo &info); - void configure_generic(const DepthwiseConvolutionReshapeInfo &info); -}; -} // namespace arm_compute -#endif /* ARM_COMPUTE_CLDEPTHWISECONVOLUTIONLAYERRESHAPEWEIGHTSKERNEL_H */ diff --git a/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp b/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp index d9f293ba73..c688951d57 100644 --- a/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp +++ b/src/core/CL/kernels/CLL2NormalizeLayerKernel.cpp @@ -36,8 +36,6 @@ #include "support/StringSupport.h" -#include "utils/TypePrinter.h" - namespace arm_compute { namespace diff --git a/src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h b/src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h deleted file mode 100644 index 4c92ae417f..0000000000 --- a/src/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (c) 2017-2020 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_ICLDEPTHWISECONVOLUTIONKERNEL3x3_H -#define ARM_COMPUTE_ICLDEPTHWISECONVOLUTIONKERNEL3x3_H - -#include "src/core/CL/ICLKernel.h" - -namespace arm_compute -{ -class ICLTensor; - -/** Interface for the kernel to run a 3x3 depthwise convolution on a tensor. - */ -class ICLDepthwiseConvolutionLayer3x3Kernel : public ICLKernel -{ -public: - /** Default constructor */ - ICLDepthwiseConvolutionLayer3x3Kernel() - : _border_size(0), _input(), _output(), _weights(), _biases(), _conv_stride_y(1), _output_multipliers(), _output_shifts(), _is_quantized(false) - { - } - /** Prevent instances of this class from being copied (As this class contains pointers) */ - ICLDepthwiseConvolutionLayer3x3Kernel(const ICLDepthwiseConvolutionLayer3x3Kernel &) = delete; - /** Prevent instances of this class from being copied (As this class contains pointers) */ - ICLDepthwiseConvolutionLayer3x3Kernel &operator=(const ICLDepthwiseConvolutionLayer3x3Kernel &) = delete; - /** Default Move Constructor. */ - ICLDepthwiseConvolutionLayer3x3Kernel(ICLDepthwiseConvolutionLayer3x3Kernel &&) = default; - /** Default move assignment operator */ - ICLDepthwiseConvolutionLayer3x3Kernel &operator=(ICLDepthwiseConvolutionLayer3x3Kernel &&) = default; - /** Initialize the function's source, destination, conv and border_size. - * - * @param[in] input Source tensor. DataType supported: QASYMM8/F16/F32. - * @param[in] weights Weights tensor. A 3D tensor with dimensions [3, 3, IFM]. - * Data type supported: Same as @p input, QASYMM8/QSYMM8_PER_CHANNEL when input is QASYMM8. - * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed. - * Data type supported: Same as @p input, S32 when input is QASYMM8. - * @param[out] output Destination tensor. Data type supported: Same as @p input. - * @param[in] conv_info Padding and stride information to use for the convolution. - * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported for QASYMM8. - * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). - * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization, - * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 - * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization, - * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 - */ - virtual void configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, - unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U), - const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) = 0; - /** Initialize the function's source, destination, conv and border_size. - * - * @param[in] compile_context The compile context to be used. - * @param[in] input Source tensor. DataType supported: QASYMM8/F16/F32. - * @param[in] weights Weights tensor. A 3D tensor with dimensions [3, 3, IFM]. - * Data type supported: Same as @p input, QASYMM8/QSYMM8_PER_CHANNEL when input is QASYMM8. - * @param[in] biases Biases tensor. A 1D tensor with dimensions [IFM]. Must be nullptr if not needed. - * Data type supported: Same as @p input, S32 when input is QASYMM8. - * @param[out] output Destination tensor. Data type supported: Same as @p input. - * @param[in] conv_info Padding and stride information to use for the convolution. - * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1. - * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU are supported for QASYMM8. - * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). - * @param[in] output_multipliers (Optional) Output multipliers tensor for quantized computations. In case of per-channel quantization, - * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 - * @param[in] output_shifts (Optional) Output shifts tensor for quantized computations. In case of per-channel quantization, - * the number of multipliers must be equal to the number of filters (IFM). Supported data types: S32 - */ - virtual void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, - unsigned int depth_multiplier = 1, ActivationLayerInfo act_info = ActivationLayerInfo(), const Size2D &dilation = Size2D(1U, 1U), - const ICLTensor *output_multipliers = nullptr, const ICLTensor *output_shifts = nullptr) = 0; - -protected: - BorderSize _border_size; - const ICLTensor *_input; - ICLTensor *_output; - const ICLTensor *_weights; - const ICLTensor *_biases; - unsigned int _conv_stride_y; - const ICLTensor *_output_multipliers; - const ICLTensor *_output_shifts; - bool _is_quantized; -}; -} // namespace arm_compute -#endif /*ARM_COMPUTE_ICLDEPTHWISECONVOLUTIONKERNEL3x3_H */ -- cgit v1.2.1