aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL/cl_kernels/direct_convolution3x3.cl
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/CL/cl_kernels/direct_convolution3x3.cl')
-rw-r--r--src/core/CL/cl_kernels/direct_convolution3x3.cl155
1 files changed, 151 insertions, 4 deletions
diff --git a/src/core/CL/cl_kernels/direct_convolution3x3.cl b/src/core/CL/cl_kernels/direct_convolution3x3.cl
index 28da544f89..26f24e187b 100644
--- a/src/core/CL/cl_kernels/direct_convolution3x3.cl
+++ b/src/core/CL/cl_kernels/direct_convolution3x3.cl
@@ -40,6 +40,8 @@ MULQ_SAT_IMPL(qs32x8, qs32x8)
#endif /* FIXED_POINT_POSITION */
+#if defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH)
+
#if STRIDE_X == 1
#define CONVOLUTION1x3(acc, src_row_ptr, weights_row_ptr) CONVOLUTION1x3_STRIDE1(acc, src_row_ptr, weights_row_ptr)
#elif STRIDE_X == 2 /* STRIDE_X == 1 */
@@ -77,11 +79,12 @@ MULQ_SAT_IMPL(qs32x8, qs32x8)
/** This kernel performs a direct convolution to convolve the low three dimensions.
*
+ * @note This OpenCL kernel works with stride_x = 1 and 2
* @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
* @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
- * @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row.
+ * @note If biases are used then -DHAS_BIAS has to be passed at compile time
*
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/QS16/F16/F32
* @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
@@ -111,7 +114,6 @@ MULQ_SAT_IMPL(qs32x8, qs32x8)
* @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
* @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
*/
-#if defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH)
__kernel void direct_convolution3x3(
TENSOR3D_DECLARATION(src),
TENSOR3D_DECLARATION(dst),
@@ -152,4 +154,149 @@ __kernel void direct_convolution3x3(
vstore8(CONVERT_SAT(pixels0, VEC_DATA_TYPE(DATA_TYPE, 8)), 0, (__global DATA_TYPE *)dst.ptr);
}
-#endif // defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH)
+#endif //defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH)
+
+#if defined(WEIGHTS_DEPTH)
+
+#define CONVOLUTION1x3_BIFROST(acc, src0, src1, weights_row0) \
+ ({ \
+ acc.s0 = mad(src0.s0, weights_row0.s0, acc.s0); \
+ acc.s1 = mad(src0.s1, weights_row0.s0, acc.s1); \
+ acc.s2 = mad(src0.s2, weights_row0.s0, acc.s2); \
+ acc.s3 = mad(src0.s3, weights_row0.s0, acc.s3); \
+ acc.s0 = mad(src0.s1, weights_row0.s1, acc.s0); \
+ acc.s1 = mad(src0.s2, weights_row0.s1, acc.s1); \
+ acc.s2 = mad(src0.s3, weights_row0.s1, acc.s2); \
+ acc.s3 = mad(src1.s0, weights_row0.s1, acc.s3); \
+ acc.s0 = mad(src0.s2, weights_row0.s2, acc.s0); \
+ acc.s1 = mad(src0.s3, weights_row0.s2, acc.s1); \
+ acc.s2 = mad(src1.s0, weights_row0.s2, acc.s2); \
+ acc.s3 = mad(src1.s1, weights_row0.s2, acc.s3); \
+ })
+
+/** An optimized direct convolution 3x3 OpenCL kernel for Bifrost architectures when the data type is F32
+ *
+ * @note This OpenCL kernel works only with stride_x and stride_y equal to 1
+ * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
+ * @note In case biases, -DHAS_BIAS must to be passed at compile
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[out] weights_ptr Pointer to the weights tensor. Supported data types: same as @p weights_ptr
+ * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
+ * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
+ * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
+ * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
+ * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
+ */
+__kernel void direct_convolution3x3_f32_bifrost(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ TENSOR3D_DECLARATION(weights),
+#ifdef HAS_BIAS
+ VECTOR_DECLARATION(biases),
+#endif /* defined(HAS_BIAS) */
+ unsigned int weights_stride_w)
+{
+ // Get the kernel index
+ const int kernel_index = get_global_id(2);
+
+ Image src = CONVERT_TO_IMAGE_STRUCT(src);
+ Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
+
+ float4 pixels0 = 0;
+ float4 pixels1 = 0;
+ float4 pixels2 = 0;
+
+ __global uchar *weights_addr = (__global uchar *)(weights_ptr + weights_offset_first_element_in_bytes + kernel_index * weights_stride_w);
+ __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0);
+
+ // Note: Since each work-item computes 4x3 elements, we need to load 5 rows from the input tensor
+
+ for(ushort d = 0; d < (ushort)WEIGHTS_DEPTH; ++d)
+ {
+ // Load the weights
+ float3 weights_row0 = vload3(0, (__global float *)(weights_addr + 0 * weights_stride_y));
+ float3 weights_row1 = vload3(0, (__global float *)(weights_addr + 1 * weights_stride_y));
+ float3 weights_row2 = vload3(0, (__global float *)(weights_addr + 2 * weights_stride_y));
+ float4 src0;
+ float2 src1;
+
+ // Load values from row0 of input tensor
+ src0 = vload4(0, (__global float *)(src_addr + 0 * src_stride_y));
+ src1 = vload2(0, (__global float *)(src_addr + 0 * src_stride_y) + 4);
+
+ CONVOLUTION1x3_BIFROST(pixels0, src0, src1, weights_row0);
+
+ // Load values from row1 of input tensor
+ src0 = vload4(0, (__global float *)(src_addr + 1 * src_stride_y));
+ src1 = vload2(0, (__global float *)(src_addr + 1 * src_stride_y) + 4);
+
+ // Accumulate
+ CONVOLUTION1x3_BIFROST(pixels0, src0, src1, weights_row1);
+ CONVOLUTION1x3_BIFROST(pixels1, src0, src1, weights_row0);
+
+ // Load values from row2 of input tensor
+ src0 = vload4(0, (__global float *)(src_addr + 2 * src_stride_y));
+ src1 = vload2(0, (__global float *)(src_addr + 2 * src_stride_y) + 4);
+
+ // Accumulate
+ CONVOLUTION1x3_BIFROST(pixels0, src0, src1, weights_row2);
+ CONVOLUTION1x3_BIFROST(pixels1, src0, src1, weights_row1);
+ CONVOLUTION1x3_BIFROST(pixels2, src0, src1, weights_row0);
+
+ // Load values from row3 of input tensor
+ src0 = vload4(0, (__global float *)(src_addr + 3 * src_stride_y));
+ src1 = vload2(0, (__global float *)(src_addr + 3 * src_stride_y) + 4);
+
+ // Accumulate
+ CONVOLUTION1x3_BIFROST(pixels1, src0, src1, weights_row2);
+ CONVOLUTION1x3_BIFROST(pixels2, src0, src1, weights_row1);
+
+ // Row4
+ src0 = vload4(0, (__global float *)(src_addr + 4 * src_stride_y));
+ src1 = vload2(0, (__global float *)(src_addr + 4 * src_stride_y) + 4);
+
+ // Accumulate
+ CONVOLUTION1x3_BIFROST(pixels2, src0, src1, weights_row2);
+
+ src_addr += src_stride_z;
+ weights_addr += weights_stride_z;
+ }
+
+#ifdef HAS_BIAS
+ Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
+
+ float4 bias = (float4) * ((__global float *)(vector_offset(&biases, kernel_index)));
+
+ pixels0 += bias;
+ pixels1 += bias;
+ pixels2 += bias;
+#endif /* defined(HAS_BIAS) */
+
+ vstore4(pixels0, 0, (__global float *)(dst.ptr + 0 * dst_stride_y));
+ vstore4(pixels1, 0, (__global float *)(dst.ptr + 1 * dst_stride_y));
+ vstore4(pixels2, 0, (__global float *)(dst.ptr + 2 * dst_stride_y));
+}
+#endif // defined(WEIGHTS_DEPTH)