From d7295b7079f6b9126596cea998146ca9c6e87706 Mon Sep 17 00:00:00 2001 From: Dmitry Savenko Date: Mon, 20 Nov 2017 22:00:08 +0700 Subject: COMPMID-661: Add QASYMM8 support (and basic tests) to CLDepthwiseConvolution3x3 kernel (#28) Change-Id: I51bebe74e3814c1245812ad575fe7854d460674f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/109864 Reviewed-by: Anthony Barbier Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com --- .../cl_kernels/depthwise_convolution_quantized.cl | 258 +++++++++++++++++++++ 1 file changed, 258 insertions(+) create mode 100644 src/core/CL/cl_kernels/depthwise_convolution_quantized.cl (limited to 'src/core/CL/cl_kernels/depthwise_convolution_quantized.cl') diff --git a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl new file mode 100644 index 0000000000..19a509bd0a --- /dev/null +++ b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "helpers_asymm.h" + +#if defined(CONV_STRIDE_X) + +#if CONV_STRIDE_X == 1 +#define convolution1x3 convolution1x3_stride_1 +#elif CONV_STRIDE_X == 2 +#define convolution1x3 convolution1x3_stride_2 +#elif CONV_STRIDE_X == 3 +#define convolution1x3 convolution1x3_stride_3 +#else /* CONV_STRIDE_X */ +#error "Stride not supported" +#endif /* CONV_STRIDE_X */ + +/** Compute a 1D horizontal convolution of size 3 and stride 1 for uchar type. + * + * @param[in] left_pixel Pointer to the left pixel. + * @param[in] left_coeff Weight of the left pixel + * @param[in] middle_coeff Weight of the middle pixel + * @param[in] right_coeff Weight of the right pixel + * @param[in] input_offset Quantized offset of zero point of the input tensor data range + * @param[in] weight_offset Quantized offset of zero point of the weights tensor data range + * + * @return a int2 containing 2 convoluted values. + */ +inline int2 convolution1x3_stride_1(__global const uchar *left_pixel, + const int left_coeff, + const int middle_coeff, + const int right_coeff, + const int input_offset, + const int weight_offset) +{ + int4 temp = CONVERT(vload4(0, left_pixel), int4); + + int2 left = CONVERT(temp.s01, int2); + int2 middle = CONVERT(temp.s12, int2); + int2 right = CONVERT(temp.s23, int2); + + return (left + input_offset) * (int2)(left_coeff + weight_offset) + (middle + input_offset) * (int2)(middle_coeff + weight_offset) + (right + input_offset) * (int2)(right_coeff + weight_offset); +} + +/** Compute a 1D horizontal convolution of size 3 and stride 2 for uchar type. + * + * @param[in] left_pixel Pointer to the left pixel. + * @param[in] left_coeff Weight of the left pixel + * @param[in] middle_coeff Weight of the middle pixel + * @param[in] right_coeff Weight of the right pixel + * @param[in] input_offset Quantized offset of zero point of the input tensor data range + * @param[in] weight_offset Quantized offset of zero point of the weights tensor data range + * + * @return a int2 containing 2 convoluted values. + */ +inline int2 convolution1x3_stride_2(__global const uchar *left_pixel, + const int left_coeff, + const int middle_coeff, + const int right_coeff, + const int input_offset, + const int weight_offset) +{ + int4 temp0 = CONVERT(vload4(0, left_pixel), int4); + int temp1 = CONVERT(*(left_pixel + 4 * sizeof(uchar)), int); + + int2 left = CONVERT(temp0.s02, int2); + int2 middle = CONVERT(temp0.s13, int2); + int2 right = CONVERT((int2)(temp0.s2, temp1), int2); + + return (left + input_offset) * (int2)(left_coeff + weight_offset) + (middle + input_offset) * (int2)(middle_coeff + weight_offset) + (right + input_offset) * (int2)(right_coeff + weight_offset); +} + +/** Compute a 1D horizontal convolution of size 3 and stride 3 for uchar type. + * + * @param[in] left_pixel Pointer to the left pixel. + * @param[in] left_coeff Weight of the left pixel + * @param[in] middle_coeff Weight of the middle pixel + * @param[in] right_coeff Weight of the right pixel + * @param[in] input_offset Quantized offset of zero point of the input tensor data range + * @param[in] weight_offset Quantized offset of zero point of the weights tensor data range + * + * @return a int2 containing 2 convoluted values. + */ +inline int2 convolution1x3_stride_3(__global const uchar *left_pixel, + const int left_coeff, + const int middle_coeff, + const int right_coeff, + const int input_offset, + const int weight_offset) +{ + int4 temp0 = CONVERT(vload4(0, left_pixel), int4); + int2 temp1 = CONVERT(vload2(0, (left_pixel + 4 * sizeof(uchar))), int2); + + int2 left = CONVERT(temp0.s03, int2); + int2 middle = CONVERT((int2)(temp0.s1, temp1.s0), int2); + int2 right = CONVERT((int2)(temp0.s2, temp1.s1), int2); + + return (left + input_offset) * (int2)(left_coeff + weight_offset) + (middle + input_offset) * (int2)(middle_coeff + weight_offset) + (right + input_offset) * (int2)(right_coeff + weight_offset); +} + +/** Apply a 3x3 convolution matrix to a single channel QASYMM8 input image and return the result. + * + * Convolution matrix layout: + * + * [ mat0, mat1, mat2 ]\n + * [ mat3, mat4, mat5 ]\n + * [ mat6, mat7, mat8 ]\n + * + * @param[in] src A pointer to source Image structure + * @param[in] mat0 Coefficient from the convolution matrix + * @param[in] mat1 Coefficient from the convolution matrix + * @param[in] mat2 Coefficient from the convolution matrix + * @param[in] mat3 Coefficient from the convolution matrix + * @param[in] mat4 Coefficient from the convolution matrix + * @param[in] mat5 Coefficient from the convolution matrix + * @param[in] mat6 Coefficient from the convolution matrix + * @param[in] mat7 Coefficient from the convolution matrix + * @param[in] mat8 Coefficient from the convolution matrix + * @param[in] input_offset Quantized offset of zero point of the input tensor data range + * @param[in] weight_offset Quantized offset of zero point of the weights tensor data range + * @param[in] output_offset Quantized offset of zero point of the output tensor data range + * @param[in] output_multiplier Output scale multiplier + * @param[in] output_shift Output scale divisor exponent + * @param[in] bias (Optional) Bias value + * + * @return a uchar2 containing 2 convoluted values. + */ +inline uchar2 convolution3x3( + Image *src, + const uchar mat0, const uchar mat1, const uchar mat2, + const uchar mat3, const uchar mat4, const uchar mat5, + const uchar mat6, const uchar mat7, const uchar mat8, + const int input_offset, const int weight_offset, const int output_offset, + const int output_multiplier, const int output_shift +#if defined(HAS_BIAS) + , + const int bias +#endif //defined(HAS_BIAS) +) +{ + int2 pixels; + + pixels = convolution1x3(offset(src, 0, 0), mat0, mat1, mat2, input_offset, weight_offset); + pixels += convolution1x3(offset(src, 0, 1), mat3, mat4, mat5, input_offset, weight_offset); + pixels += convolution1x3(offset(src, 0, 2), mat6, mat7, mat8, input_offset, weight_offset); +#if defined(HAS_BIAS) + pixels += (int2)(bias); +#endif //defined(HAS_BIAS) + + pixels = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(pixels, output_multiplier, output_shift, 2); + pixels = pixels + output_offset; + pixels = clamp(pixels, 0, 255); + + return CONVERT(pixels, uchar2); +} + +/** This function computes the horizontal integral of the image. + * + * @param[in] src_ptr Pointer to the source image. Supported data types: QASYMM8 + * @param[in] src_stride_x Stride of the source image in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image + * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: QASYMM8 + * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor + * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8 + * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) + * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) + * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes) + * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes) + * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor + * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: QASYMM8 + * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) + * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector + * @param[in] input_offset Quantized offset of zero point of the input tensor data range + * @param[in] weight_offset Quantized offset of zero point of the weights tensor data range + * @param[in] output_offset Quantized offset of zero point of the output tensor data range + * @param[in] output_multiplier Output scale multiplier + * @param[in] output_shift Output scale divisor exponent + */ + +__kernel void depthwise_convolution_3x3_quantized( + TENSOR3D_DECLARATION(src), + TENSOR3D_DECLARATION(dst), + TENSOR3D_DECLARATION(weights), +#if defined(HAS_BIAS) + VECTOR_DECLARATION(biases), +#endif //defined(HAS_BIAS) + int input_offset, + int weight_offset, + int output_offset, + int output_multiplier, + int output_shift) +{ + Image src = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(src); + Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst); + Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT(weights); +#if defined(HAS_BIAS) + Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases); +#endif //defined(HAS_BIAS) + + uchar3 offset = (uchar3)(0, 1, 2) * (uchar3)weights_stride_y; + uchar3 weights_values0 = vload3(0, weights.ptr + offset.s0); + uchar3 weights_values1 = vload3(0, weights.ptr + offset.s1); + uchar3 weights_values2 = vload3(0, weights.ptr + offset.s2); + +#if defined(HAS_BIAS) + int bias_value = *((__global int *)(vector_offset(&biases, get_global_id(2)))); +#endif //defined(HAS_BIAS) + + uchar2 pixels = convolution3x3(&src, weights_values0.s0, weights_values0.s1, weights_values0.s2, + weights_values1.s0, weights_values1.s1, weights_values1.s2, + weights_values2.s0, weights_values2.s1, weights_values2.s2, + input_offset, weight_offset, output_offset, + output_multiplier, output_shift +#if defined(HAS_BIAS) + , + bias_value +#endif //defined(HAS_BIAS) + ); + + vstore2(pixels, 0, dst.ptr); +} + +#endif //defined(CONV_STRIDE_X) -- cgit v1.2.1