aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL/cl_kernels
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/CL/cl_kernels')
-rw-r--r--src/core/CL/cl_kernels/activation_quant_helpers.h2
-rw-r--r--src/core/CL/cl_kernels/depthwise_convolution.cl187
-rw-r--r--src/core/CL/cl_kernels/depthwise_convolution_quantized.cl165
-rw-r--r--src/core/CL/cl_kernels/direct_convolution_1x1_3x3_5x5_quantized.cl78
-rw-r--r--src/core/CL/cl_kernels/helpers.h22
-rw-r--r--src/core/CL/cl_kernels/helpers_asymm.h2
6 files changed, 189 insertions, 267 deletions
diff --git a/src/core/CL/cl_kernels/activation_quant_helpers.h b/src/core/CL/cl_kernels/activation_quant_helpers.h
index 402e7ac41f..0e4eb2b32e 100644
--- a/src/core/CL/cl_kernels/activation_quant_helpers.h
+++ b/src/core/CL/cl_kernels/activation_quant_helpers.h
@@ -41,7 +41,7 @@ inline TYPE relu_op(TYPE x)
// Bounded RELU Activation
inline TYPE brelu_op(TYPE x)
{
- return min((TYPE)A_VAL, max(CONST_0, x));
+ return min((TYPE)A_VAL, max((TYPE)CONST_0, x));
}
// Lower Upper Bounded RELU Activation
inline TYPE lu_brelu_op(TYPE x)
diff --git a/src/core/CL/cl_kernels/depthwise_convolution.cl b/src/core/CL/cl_kernels/depthwise_convolution.cl
index 1b2f5cccaa..3a227282ff 100644
--- a/src/core/CL/cl_kernels/depthwise_convolution.cl
+++ b/src/core/CL/cl_kernels/depthwise_convolution.cl
@@ -782,173 +782,6 @@ __kernel void depthwise_convolution_reshape_weights(
}
#endif // defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DST_WIDTH)
-#if defined(NCHW)
-#define in_stride_x src_stride_x
-#define in_stride_y src_stride_y
-#define in_stride_z src_stride_z
-#define out_stride_x dst_stride_x
-#define out_stride_y dst_stride_y
-#define out_stride_z dst_stride_z
-#else //defined(NCHW)
-#define in_stride_x src_stride_y
-#define in_stride_y src_stride_z
-#define in_stride_z src_stride_x
-#define out_stride_x dst_stride_y
-#define out_stride_y dst_stride_z
-#define out_stride_z dst_stride_x
-#endif //defined(NCHW)
-
-#if defined(SRC_WIDTH) && defined(DATA_TYPE)
-/** This kernel reshapes each of the tensor's low three dimensions to single rows.
- *
- * @note Datatype and source width should be given as a preprocessor argument using -DDATA_TYPE=type and -DSRC_WIDTH=width. e.g. -DSRC_WIDTH=128
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: F16/F32
- * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- */
-__kernel void depthwise_convolution_reshape_weights_generic(
- TENSOR3D_DECLARATION(src),
- IMAGE_DECLARATION(dst)
-#ifdef HAS_BIAS
- ,
- VECTOR_DECLARATION(biases)
-#endif /* HAS_BIAS */
-)
-{
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-#endif /* HAS_BIAS */
-
- __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + get_global_id(1) * in_stride_y + get_global_id(2) * in_stride_z;
- __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + get_global_id(1) * SRC_WIDTH * dst_stride_x + get_global_id(2) * dst_stride_y;
-
- for(int i = 0; i < SRC_WIDTH; ++i, input_ptr += in_stride_x)
- {
- *((__global DATA_TYPE *)(output_ptr + i * dst_stride_x)) = *((__global DATA_TYPE *)input_ptr);
- }
-
-#if defined(HAS_BIAS)
- if(get_global_id(1) == 0)
- {
- *((__global DATA_TYPE *)(output_ptr + SRC_WIDTH * get_global_size(1) * dst_stride_x)) = *((__global DATA_TYPE *)(biases.ptr + get_global_id(2) * biases_stride_x));
- }
-#endif // defined(HAS_BIAS)
-}
-#endif //defined(SRC_WIDTH) && defined(DATA_TYPE)
-
-#if defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP) && defined(PAD_RIGHT) && defined(PAD_BOTTOM) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(DATA_TYPE) && defined(PAD_VALUE) && defined(DEPTH_MULTIPLIER) && defined(DILATION_X) && defined(DILATION_Y)
-/** This kernel performs a reshaping of the input tensor to a tensor used to perform depthwise convolution using vector to matrix multiplication.
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The convolution information must be passed at compile time using -DSTRIDE_X, -DSTRIDE_Y, -DPAD_LEFT, -DPAD_TOP, -DPAD_RIGHT, -DPAD_BOTTOM, -DKERNEL_WIDHT, -DKERNEL_HEIGHT, -DSRC_WIDTH, -DSRC_HEIGHT, -DDEPTH_MULTIPLIER
- * @note The dilation_x and dilation_y must be passed at compile time using -DDILATION_X and -DDILATION_Y: e.g. -DDILATION_X=1, -DDILATION_Y=1
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void depthwise_im2col(TENSOR3D_DECLARATION(src), TENSOR3D_DECLARATION(dst))
-{
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
- const int src_pixel_linear = get_global_id(1) * STRIDE_X;
- const int full_length = SRC_WIDTH + PAD_LEFT + PAD_RIGHT;
- const int max_initial_x = STRIDE_X * (((full_length - (KERNEL_WIDTH + (KERNEL_WIDTH - 1) * (DILATION_X - 1))) / STRIDE_X) + 1);
-
- const int src_x = -PAD_LEFT + src_pixel_linear % max_initial_x;
- const int src_y = -PAD_TOP + src_pixel_linear / max_initial_x * STRIDE_Y;
- const int src_z = get_global_id(2) / DEPTH_MULTIPLIER;
-
- __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + src_z * in_stride_z;
- __global DATA_TYPE *output_ptr = ((__global DATA_TYPE *)(dst.ptr));
-
- for(int y = src_y; y < src_y + KERNEL_HEIGHT + (KERNEL_HEIGHT - 1) * (DILATION_Y - 1); y += DILATION_Y)
- {
- for(int x = src_x; x < src_x + KERNEL_WIDTH + (KERNEL_WIDTH - 1) * (DILATION_X - 1); x += DILATION_X, ++output_ptr)
- {
- if(x < 0 || x >= SRC_WIDTH || y < 0 || y >= SRC_HEIGHT)
- {
- *output_ptr = PAD_VALUE;
- }
- else
- {
- *output_ptr = *((__global DATA_TYPE *)(input_ptr + x * in_stride_x + y * in_stride_y));
- }
- }
- }
-#if defined(HAS_BIAS)
- *output_ptr = (DATA_TYPE)(1);
-#endif // defined(HAS_BIAS)
-}
-
-#endif //defined(STRIDE_X) && defined(STRIDE_Y) && defined(PAD_LEFT) && defined(PAD_TOP) && defined(PAD_RIGHT) && defined(PAD_BOTTOM) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(SRC_WIDTH) && defined(DATA_TYPE) && defined(PAD_VALUE) && defined(DEPTH_MULTIPLIER)
-
-#if defined(CONV_WIDTH) && defined(CONV_HEIGHT) && defined(DATA_TYPE)
-
-/** This kernel performs a reshaping of the output of the depthwise generic convolution.
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The convolution information must be passed at compile time using -DCONV_WIDTH, -DCONV_HEIGHT, e.g -DCONV_WIDTH=32, -DCONV_HEIGHT=42
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void depthwise_vector_to_tensor(
- VECTOR_DECLARATION(src),
- TENSOR3D_DECLARATION(dst))
-{
- Vector src = CONVERT_TO_VECTOR_STRUCT(src);
-
- const int patch_size = CONV_WIDTH * CONV_HEIGHT;
- const int id0 = get_global_id(0);
- const int z = id0 / patch_size;
- const int index2D = id0 - z * patch_size;
-
- __global uchar *out_ptr = dst_ptr + dst_offset_first_element_in_bytes + index2D % CONV_WIDTH * out_stride_x + index2D / CONV_WIDTH * out_stride_y + z * out_stride_z;
- *((__global DATA_TYPE *)out_ptr) = *((__global DATA_TYPE *)src.ptr);
-}
-
-#endif //defined(CONV_WIDTH) && defined(CONV_HEIGHT) && defined(DATA_TYPE)
-
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) && defined(IS_F16)
#if defined(CONV_STRIDE_X)
#if CONV_STRIDE_X == 1
@@ -1478,7 +1311,7 @@ __kernel void depthwise_convolution_3x3_stridex2_stridey2_bifrost_f16(
//3x3 Convolution of elements starting in 0th row
pixels0 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(src_addr, src.stride_x, src.stride_y, 0, weights_addr, weights_stride_y);
//3x3 Convolution of elements starting in 2nd row
- pixels1 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(src_addr, src.stride_x, src.stride_y, 2, weights_addr, weights_stride_y);
+ pixels1 = convolution_3x3_dilation_stridex2_stridey2_bifrost_f16(src_addr, src.stride_x, src.stride_y, 2, weights_addr, weights_stride_y);
#endif /* DILATION_X==1 && DILATION_Y==1 */
#ifdef HAS_BIAS
@@ -1556,23 +1389,17 @@ __kernel void dwc_MxN_native_fp_nhwc(
int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y
int b = get_global_id(2) / (int)DST_DEPTH; // batch
#else // defined(DST_DEPTH)
- int z = get_global_id(2); // spatial coordinate y
+ int z = get_global_id(2); // spatial coordinate y
#endif // defined(DST_DEPTH)
- __global uchar *s_addr = src_ptr + src_offset_first_element_in_bytes +
- x * sizeof(DATA_TYPE) * (int)N0;
+ __global uchar *s_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) * (int)N0;
- __global uchar *d_addr = dst_ptr + dst_offset_first_element_in_bytes +
- x * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER * (int)N0 +
- y * dst_stride_y +
- z * dst_stride_z;
+ __global uchar *d_addr = dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER * (int)N0 + y * dst_stride_y + z * dst_stride_z;
- __global uchar *w_addr = weights_ptr + weights_offset_first_element_in_bytes +
- x * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER * (int)N0;
+ __global uchar *w_addr = weights_ptr + weights_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER * (int)N0;
#if defined(HAS_BIAS)
- __global uchar *b_addr = biases_ptr + biases_offset_first_element_in_bytes +
- x * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER * (int)N0;
+ __global uchar *b_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER * (int)N0;
#endif // defined(HAS_BIAS)
#if defined(DST_DEPTH)
@@ -1611,7 +1438,7 @@ __kernel void dwc_MxN_native_fp_nhwc(
#if GPU_ARCH == GPU_ARCH_MIDGARD
res += i * w;
#else // GPU_ARCH == GPU_ARCH_MIDGARD
- res = fma(i, w, res);
+ res = fma(i, w, res);
#endif // GPU_ARCH == GPU_ARCH_MIDGARD
}
x_coord_tmp += DILATION_X;
diff --git a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
index 8f2e441693..10872d460a 100644
--- a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
+++ b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
@@ -24,19 +24,30 @@
#include "helpers_asymm.h"
-#if defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && ((defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)) || defined(REAL_MULTIPLIER))
-
-#if defined(ACTIVATION_TYPE) && defined(CONST_0)
-#define DATA_TYPE uchar
#ifndef VEC_SIZE
+#if defined(N0)
+#define VEC_SIZE N0
+#else /* defined(N0) */
#define VEC_SIZE 8
+#endif /* defined(N0) */
#endif /* VEC_SIZE */
+
+#if defined(ACTIVATION_TYPE) && defined(CONST_0)
+#define DATA_TYPE uchar
#include "activation_layer_quant.cl"
#define ACTIVATION_FUNC(x) PERFORM_ACTIVATION_QUANT(ACTIVATION_TYPE, x)
#else /* defined(ACTIVATION_TYPE) && defined(CONST_0) */
#define ACTIVATION_FUNC(x) (x)
#endif /* defined(ACTIVATION_TYPE) && defined(CONST_0) */
+#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE)
+#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE)
+#define VEC_UCHAR VEC_DATA_TYPE(uchar, VEC_SIZE)
+#define VEC_USHORT VEC_DATA_TYPE(ushort, VEC_SIZE)
+#define VEC_SHORT VEC_DATA_TYPE(short, VEC_SIZE)
+
+#if defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && ((defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)) || defined(REAL_MULTIPLIER))
+
#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
#define ARM_DOT(x, y, val) val = arm_dot_acc((x), (y), val);
@@ -635,11 +646,6 @@ __kernel void dwc_3x3_native_qasymm8_dot8_nchw(
#define asymm_mult_by_quant_multiplier_less_than_one(x, y, z) ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, y, z, VEC_SIZE)
-#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE)
-#define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE)
-#define VEC_UCHAR VEC_DATA_TYPE(uchar, VEC_SIZE)
-#define VEC_USHORT VEC_DATA_TYPE(ushort, VEC_SIZE)
-
#define MULTIPLY_ADD(x, y, acc) acc += CONVERT(CONVERT(x, VEC_USHORT) * CONVERT(y, VEC_USHORT), VEC_INT)
#if WEIGHTS_OFFSET != 0
@@ -1375,3 +1381,144 @@ __kernel void dwc_3x3_reshaped_qasymm8_dot8_stride1_nhwc(
#endif // defined(VEC_SIZE) && defined(SRC_DIM_1) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT)
#endif // defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && ((defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)) || defined(REAL_MULTIPLIER))
+
+#if defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(N0) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(INPUT_OFFSET) && defined(WEIGHTS_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)
+/** This function computes the depthwise convolution for NHWC data layout. This kernel assumes that the weights tensor is NOT reshaped
+ *
+ * @note The number of elements processed must be passed at compile time using -DN0 (e.g. -DN0=2)
+ * @note The depth multiplier must be passed at compile time using -DDEPTH_MULTIPLIER (e.g. -DDEPTH_MULTIPLIER=1)
+ * @note The first dimension of the input tensor must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM1=112)
+ * @note The second dimension of the input tensor must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM2=80)
+ * @note The kernel width must be passed at compile time using -DKERNEL_WIDTH (e.g. -DKERNEL_WIDTH=5)
+ * @note The kernel height must be passed at compile time using -DKERNEL_HEIGHT (e.g. -DKERNEL_HEIGHT=5)
+ * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1)
+ * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1)
+ * @note The convolution stride along the width must be passed at compile time using -DCONV_STRIDE_X (e.g. -DCONV_STRIDE_Y=X)
+ * @note The convolution stride along the height must be passed at compile time using -DCONV_STRIDE_Y (e.g. -DCONV_STRIDE_Y=1)
+ * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
+ * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8
+ * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes)
+ * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: same as src_ptr
+ * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
+ * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
+ */
+__kernel void dwc_MxN_native_quantized8_nhwc(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst),
+ TENSOR3D_DECLARATION(weights),
+#if defined(HAS_BIAS)
+ VECTOR_DECLARATION(biases)
+#endif // defined(HAS_BIAS)
+)
+{
+ int x = get_global_id(0); // channels
+ int y = get_global_id(1); // spatial coordinate x
+#if defined(DST_DEPTH)
+ int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y
+ int b = get_global_id(2) / (int)DST_DEPTH; // batch
+#else // defined(DST_DEPTH)
+ int z = get_global_id(2); // spatial coordinate y
+#endif // defined(DST_DEPTH)
+
+ __global uchar *s_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(uchar) * (int)N0;
+
+ __global uchar *d_addr = dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(uchar) * (int)DEPTH_MULTIPLIER * (int)N0 + y * dst_stride_y + z * dst_stride_z;
+
+ __global uchar *w_addr = weights_ptr + weights_offset_first_element_in_bytes + x * sizeof(uchar) * (int)DEPTH_MULTIPLIER * (int)N0;
+
+#if defined(HAS_BIAS)
+ __global uchar *b_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int) * (int)DEPTH_MULTIPLIER * (int)N0;
+#endif // defined(HAS_BIAS)
+
+#if defined(DST_DEPTH)
+ s_addr += b * src_stride_w;
+ d_addr += b * dst_stride_w;
+#endif // defined(DST_DEPTH)
+
+#if DEPTH_MULTIPLIER > 1
+ for(int d = 0; d < (int)DEPTH_MULTIPLIER; ++d)
+ {
+#endif // DEPTH_MULTIPLIER > 1
+ // Each work-item computes N0x1x1 elements
+ VEC_SHORT res = 0;
+
+ int x_coord = y * CONV_STRIDE_X - (int)CONV_PAD_LEFT;
+ int y_coord = z * CONV_STRIDE_Y - (int)CONV_PAD_TOP;
+
+ for(int yk = 0; yk < KERNEL_HEIGHT; ++yk)
+ {
+ if(y_coord >= 0 && y_coord < SRC_DIM2)
+ {
+ int x_coord_tmp = x_coord;
+
+ for(int xk = 0; xk < KERNEL_WIDTH; ++xk)
+ {
+ if(x_coord_tmp >= 0 && x_coord_tmp < SRC_DIM1)
+ {
+ int s_offset = x_coord_tmp * (int)src_stride_y + y_coord * (int)src_stride_z;
+ int w_offset = xk * weights_stride_y + yk * weights_stride_z;
+
+ // Load input and weights values
+ VEC_SHORT i = CONVERT(VLOAD(N0)(0, (__global uchar *)(s_addr + s_offset)), VEC_SHORT);
+ VEC_SHORT w = CONVERT(VLOAD(N0)(0, (__global uchar *)(w_addr + w_offset)), VEC_SHORT);
+
+ res += (i + (VEC_SHORT)INPUT_OFFSET) * (w + (VEC_SHORT)WEIGHTS_OFFSET);
+ }
+ x_coord_tmp += DILATION_X;
+ }
+ }
+ y_coord += DILATION_Y;
+ }
+
+#if defined(HAS_BIAS)
+ VEC_SHORT bias = CONVERT(VLOAD(N0)(0, (__global int *)(b_addr)), VEC_SHORT);
+ res += bias;
+#endif // defined(HAS_BIAS)
+
+ res = CONVERT(ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(CONVERT(res, VEC_INT), OUTPUT_MULTIPLIER, OUTPUT_SHIFT, N0), VEC_SHORT);
+ res += (VEC_SHORT)OUTPUT_OFFSET;
+
+ VEC_UCHAR res1 = CONVERT_SAT(res, VEC_UCHAR);
+
+ VSTORE(N0)
+ (ACTIVATION_FUNC(res1), 0, (__global uchar *)(d_addr));
+
+#if DEPTH_MULTIPLIER > 1
+ w_addr += sizeof(uchar);
+ d_addr += sizeof(uchar);
+#if defined(HAS_BIAS)
+ b_addr += sizeof(int);
+#endif // defined(HAS_BIAS)
+ }
+#endif // DEPTH_MULTIPLIER > 1
+}
+#endif // defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defiend(N0) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(INPUT_OFFSET) && defined(WEIGHTS_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)
diff --git a/src/core/CL/cl_kernels/direct_convolution_1x1_3x3_5x5_quantized.cl b/src/core/CL/cl_kernels/direct_convolution_1x1_3x3_5x5_quantized.cl
index 83da76785b..5ad9afb23c 100644
--- a/src/core/CL/cl_kernels/direct_convolution_1x1_3x3_5x5_quantized.cl
+++ b/src/core/CL/cl_kernels/direct_convolution_1x1_3x3_5x5_quantized.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -247,79 +247,3 @@ __kernel void direct_convolution_1x1_3x3_5x5_quantized(
vstore8(convert_uchar8_sat(pixels0), 0, (__global uchar *)dst.ptr);
}
#endif // defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH)
-
-#if defined(VEC_SIZE)
-
-#define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE)
-#define CONVERT_SAT_UCHAR_STR(x, size) (convert_uchar##size##_sat((x)))
-#define CONVERT_SAT_UCHAR(x, size) CONVERT_SAT_UCHAR_STR(x, size)
-
-/** This function computes the output stage of a depthwise convolution.
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: QASYMM8
- * @param[in] src_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: QASYMM8
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] bias_ptr (Optional) Pointer to the biases vector. Supported data types: S32
- * @param[in] bias_stride_x (Optional) Stride of the biases vector in X dimension (in bytes)
- * @param[in] bias_step_x (Optional) bias_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector
- * @param[in] output_offset Quantized offset of zero point of the output tensor data range
- * @param[in] output_multiplier Output scale multiplier
- * @param[in] output_shift Output scale divisor exponent
- */
-__kernel void output_stage_quantized(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
-#if defined(HAS_BIAS)
- VECTOR_DECLARATION(bias),
-#endif //defined(HAS_BIAS)
- int output_offset,
- int output_multiplier,
- int output_shift)
-{
- Image src = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(src);
- Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst);
-#if defined(HAS_BIAS)
- Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
-#endif //defined(HAS_BIAS)
-
- // Load input
- VEC_INT vals = VLOAD(VEC_SIZE)(0, (__global int *)(src.ptr));
-
-#if defined(HAS_BIAS)
- // Load and add bias
-#if defined(NCHW)
- int bias_value = *((__global int *)(vector_offset(&bias, get_global_id(2))));
-#else // defined(NCHW)
- VEC_INT bias_value = VLOAD(VEC_SIZE)(0, ((__global int *)(vector_offset(&bias, get_global_id(0) * VEC_SIZE))));
-#endif // defined(NCHW)
-
- vals += (VEC_INT)(bias_value);
-#endif //defined(HAS_BIAS)
-
- vals = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(vals, output_multiplier, output_shift, VEC_SIZE);
- vals = vals + output_offset;
-
- // Store result in dst
- VSTORE(VEC_SIZE)
- (CONVERT_SAT_UCHAR(vals, VEC_SIZE), 0, (__global uchar *)dst.ptr);
-}
-
-#undef VEC_INT
-#undef CONVERT_SAT_UCHAR_STR
-#undef CONVERT_SAT_UCHAR
-
-#endif // defined(VEC_SIZE)
diff --git a/src/core/CL/cl_kernels/helpers.h b/src/core/CL/cl_kernels/helpers.h
index f7f208529a..8117c1e519 100644
--- a/src/core/CL/cl_kernels/helpers.h
+++ b/src/core/CL/cl_kernels/helpers.h
@@ -150,6 +150,28 @@
#define convert_half8_sat convert_half8
#define convert_half16_sat convert_half16
+#define convert_float1 convert_float
+#define convert_half1 convert_half
+#define convert_char1 convert_char
+#define convert_uchar1 convert_uchar
+#define convert_short1 convert_short
+#define convert_ushort1 convert_ushort
+#define convert_int1 convert_int
+#define convert_uint1 convert_uint
+#define convert_long1 convert_long
+#define convert_ulong1 convert_ulong
+#define convert_double1 convert_double
+
+#define convert_char1_sat convert_char_sat
+#define convert_uchar1_sat convert_uchar_sat
+#define convert_short1_sat convert_short_sat
+#define convert_ushort1_sat convert_ushort_sat
+#define convert_int1_sat convert_int_sat
+#define convert_uint1_sat convert_uint_sat
+#define convert_long1_sat convert_long_sat
+#define convert_ulong1_sat convert_ulong_sat
+#define convert_double1_sat convert_double_sat
+
#define VEC_DATA_TYPE_STR(type, size) type##size
#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size)
diff --git a/src/core/CL/cl_kernels/helpers_asymm.h b/src/core/CL/cl_kernels/helpers_asymm.h
index 53e6719cd7..57ecccc2b2 100644
--- a/src/core/CL/cl_kernels/helpers_asymm.h
+++ b/src/core/CL/cl_kernels/helpers_asymm.h
@@ -381,11 +381,13 @@ DEQUANTIZE_IMPL(uchar, 4)
DEQUANTIZE_IMPL(ushort, 4)
DEQUANTIZE_IMPL(short, 4)
+ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(1)
ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(2)
ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(4)
ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(8)
ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(16)
+ASYMM_MULT_IMPL(1)
ASYMM_MULT_IMPL(2)
ASYMM_MULT_IMPL(4)
ASYMM_MULT_IMPL(8)