From 87781081cea0443218721e5d79bbb2f07065a2ca Mon Sep 17 00:00:00 2001 From: Aleksandr Nikolaev Date: Thu, 8 Apr 2021 10:06:34 +0100 Subject: Rewritten Winograd (4x4, 5x5) output transformation This patch takes advantage of tile_helpers.h and different data layout input and tmp matrices. Resolves: COMPMID-4142 Signed-off-by: Aleksandr Nikolaev Change-Id: I5d10bd3f08137414ee7520eef1e6d0aef8cbf160 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5382 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gian Marco Iodice Reviewed-by: Michele Di Giorgio --- .../CL/cl_kernels/winograd_output_transform.cl | 330 ++++++++------------- 1 file changed, 127 insertions(+), 203 deletions(-) (limited to 'src/core/CL/cl_kernels/winograd_output_transform.cl') diff --git a/src/core/CL/cl_kernels/winograd_output_transform.cl b/src/core/CL/cl_kernels/winograd_output_transform.cl index 1b39f62d97..a56ce3de6f 100644 --- a/src/core/CL/cl_kernels/winograd_output_transform.cl +++ b/src/core/CL/cl_kernels/winograd_output_transform.cl @@ -1076,11 +1076,13 @@ __kernel void winograd_output_transform_4x4_5x5_nchw( * @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16 * @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=4 * @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=4 + * @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT: e.g. -DSRC_HEIGHT=32 * @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24 * @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32 * @note If this kernel is used to perform Winograd output transform 5x1, -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time * @note If this kernel is used to perform Winograd output transform 1x5, -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time * @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half. + * @note The number of output elements processed along the X direction must be passed at compile time using -DN0 e.g. -DN0=1 * * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16 * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) @@ -1103,243 +1105,165 @@ __kernel void winograd_output_transform_4x4_5x5_nchw( * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes) * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor */ -__kernel void winograd_output_transform_4x4_5x5_nhwc( - TENSOR4D_DECLARATION(src), - TENSOR4D_DECLARATION(dst), + __kernel void winograd_output_transform_4x4_5x5_nhwc( + TENSOR4D(src, BUFFER), + TENSOR4D(dst, BUFFER), #if defined(HAS_BIAS) VECTOR_DECLARATION(bias), #endif // defined(HAS_BIAS) int dst_size) { - // Each thread stores a 4x4/4x1 or 1x4 tile -#if defined(SRC_DEPTH) - Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH); - const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0); -#else /* defined(SRC_DEPTH) */ - Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src); - const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0); -#endif /* defined(SRC_DEPTH) */ - - int y_in = get_global_id(1); - int x_out = get_global_id(0); - int y_out = (y_in % NUM_TILES_X) * OUTPUT_TILE_W; - int z_out = (y_in / NUM_TILES_X) * OUTPUT_TILE_H; -#if defined(SRC_DEPTH) - int batch = get_global_id(2) / SRC_DEPTH; -#endif /* defined(SRC_DEPTH) */ - - __global unsigned char *dst_base_ptr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE); - -#if defined(SRC_DEPTH) - dst_base_ptr += batch * dst_stride_w; -#endif // defined(SRC_DEPTH) - - // Load the values across the channels to compose the input tile - DATA_TYPE d00 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_z)); - DATA_TYPE d01 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_z)); - DATA_TYPE d02 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_z)); - DATA_TYPE d03 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_z)); - DATA_TYPE d04 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_z)); - DATA_TYPE d05 = *((__global DATA_TYPE *)(src_addr + 5 * src_stride_z)); - DATA_TYPE d06 = *((__global DATA_TYPE *)(src_addr + 6 * src_stride_z)); - DATA_TYPE d07 = *((__global DATA_TYPE *)(src_addr + 7 * src_stride_z)); + const int cout = GET_SPATIAL_IDX(0, N0, 0); // OFM + const int mout = GET_SPATIAL_IDX(1, 1, 0); // WINOGRAD OUTPUT TILES + const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX #if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL) - // Compute out00, out01, out02 and out03 - float out00 = d00 + d01 + d02 + d03 + d04 + 8.0f * d05 + 8.0f * d06; - float out01 = d01 - d02 + 2.0f * d03 - 2.0f * d04 + 4.0f * d05 - 4.0f * d06; - float out02 = d01 + d02 + 4.0f * d03 + 4.0f * d04 + 2.0f * d05 + 2.0f * d06; - float out03 = d01 - d02 + 8.0f * d03 - 8.0f * d04 + d05 - d06 + d07; + TILE(DATA_TYPE, 8, N0, in) = { { 0 } }; + TILE(DATA_TYPE, 4, N0, out) = { { 0 } }; + TILE(DATA_TYPE, 4, N0, tmp) = { { 0 } }; + TILE(uint, 8, 1, src_indirect_y) = { { 0 } }; -#if defined(HAS_BIAS) - // Add bias - Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias); + LOOP_UNROLLING(int, i, 0, 8, 1) + { + src_indirect_y[i].v = mout + i * SRC_HEIGHT; + src_indirect_y[i].v += bout * (int)(SRC_HEIGHT * 8); + } - float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, x_out))); + // "in" contains 1x8 or 8x1 tile here + T_LOAD_INDIRECT(DATA_TYPE, 8, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in); - out00 += (float)b; - out01 += (float)b; - out02 += (float)b; - out03 += (float)b; -#endif // defined(HAS_BIAS) + // A^T * in, and in this degenerate case out consists of 1 column/row + tmp[0].v = in[1].v - in[2].v; + tmp[1].v = 2.0f * (in[3].v - in[4].v); + tmp[2].v = 2.0f * (in[5].v + in[6].v); + tmp[3].v = in[3].v + in[4].v; + out[0].v = in[0].v + in[1].v + in[2].v + tmp[3].v + 4.0f * tmp[2].v; + out[1].v = tmp[0].v + tmp[1].v + 4.0f * (in[5].v - in[6].v) ; + out[2].v = in[1].v + in[2].v + 4.0f * tmp[3].v + tmp[2].v; + out[3].v = tmp[0].v + 4.0f * tmp[1].v + in[5].v - in[6].v + in[7].v; - // Store the output tile -#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL) - - dst_base_ptr += y_out * dst_stride_y; - - int4 offset_z = min((int4)z_out + (int4)(0, 1, 2, 3), (int4)((int)DST_HEIGHT - 1)) * (int4)dst_stride_z; +#if defined(HAS_BIAS) + TILE(DATA_TYPE, 1, N0, b); - VEC_DATA_TYPE(DATA_TYPE, 4) - out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL, - B_VAL); + T_LOAD(DATA_TYPE, 1, N0, BUFFER, bias, cout, 0, 0, b); - // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element - // is overwritten with the valid one - *((__global DATA_TYPE *)(dst_base_ptr + offset_z.s3)) = out0_dt.s3; - *((__global DATA_TYPE *)(dst_base_ptr + offset_z.s2)) = out0_dt.s2; - *((__global DATA_TYPE *)(dst_base_ptr + offset_z.s1)) = out0_dt.s1; - *((__global DATA_TYPE *)(dst_base_ptr + offset_z.s0)) = out0_dt.s0; -#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL) + // c = c + bias[broadcasted] + T_ADD_BROADCAST_X(DATA_TYPE, 4, N0, out, b, out); +#endif // HAS_BIAS - dst_base_ptr += z_out * dst_stride_z; + int x_out = (mout % NUM_TILES_X) * OUTPUT_TILE_W; + int y_out = (mout / NUM_TILES_X) * OUTPUT_TILE_H; - int4 offset_y = min((int4)y_out + (int4)(0, 1, 2, 3), (int4)((int)DST_WIDTH - 1)) * (int4)dst_stride_y; + T_ACTIVATION(DATA_TYPE, 4, N0, ACTIVATION_TYPE, A_VAL, B_VAL, out, out); - VEC_DATA_TYPE(DATA_TYPE, 4) - out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 4))(out00, out01, out02, out03), VEC_DATA_TYPE(DATA_TYPE, 4)), A_VAL, - B_VAL); + TILE(uint, 4, 1, dst_indirect_y) = { { 0 } }; - // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element - // is overwritten with the valid one - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3)) = out0_dt.s3; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2)) = out0_dt.s2; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1)) = out0_dt.s1; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0)) = out0_dt.s0; + // Calculate the destination indirect Y +#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL) + LOOP_UNROLLING(int, yk, 0, 4, 1) + { + int y_c = min(y_out + yk, ((int)DST_HEIGHT - 1)); + dst_indirect_y[yk].v = x_out + y_c * DST_WIDTH; + dst_indirect_y[yk].v += bout * (int)(DST_WIDTH * DST_HEIGHT); + } +#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL) + LOOP_UNROLLING(int, xk, 0, 4, 1) + { + int x_c = min(x_out + xk, ((int)DST_WIDTH - 1)); + dst_indirect_y[xk].v = x_c + y_out * DST_WIDTH; + dst_indirect_y[xk].v += bout * (int)(DST_WIDTH * DST_HEIGHT); + } #endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL) -#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL) - - DATA_TYPE d10 = *((__global DATA_TYPE *)(src_addr + 8 * src_stride_z)); - DATA_TYPE d11 = *((__global DATA_TYPE *)(src_addr + 9 * src_stride_z)); - DATA_TYPE d12 = *((__global DATA_TYPE *)(src_addr + 10 * src_stride_z)); - DATA_TYPE d13 = *((__global DATA_TYPE *)(src_addr + 11 * src_stride_z)); - DATA_TYPE d14 = *((__global DATA_TYPE *)(src_addr + 12 * src_stride_z)); - DATA_TYPE d15 = *((__global DATA_TYPE *)(src_addr + 13 * src_stride_z)); - DATA_TYPE d16 = *((__global DATA_TYPE *)(src_addr + 14 * src_stride_z)); - DATA_TYPE d17 = *((__global DATA_TYPE *)(src_addr + 15 * src_stride_z)); - - DATA_TYPE d20 = *((__global DATA_TYPE *)(src_addr + 16 * src_stride_z)); - DATA_TYPE d21 = *((__global DATA_TYPE *)(src_addr + 17 * src_stride_z)); - DATA_TYPE d22 = *((__global DATA_TYPE *)(src_addr + 18 * src_stride_z)); - DATA_TYPE d23 = *((__global DATA_TYPE *)(src_addr + 19 * src_stride_z)); - DATA_TYPE d24 = *((__global DATA_TYPE *)(src_addr + 20 * src_stride_z)); - DATA_TYPE d25 = *((__global DATA_TYPE *)(src_addr + 21 * src_stride_z)); - DATA_TYPE d26 = *((__global DATA_TYPE *)(src_addr + 22 * src_stride_z)); - DATA_TYPE d27 = *((__global DATA_TYPE *)(src_addr + 23 * src_stride_z)); - - DATA_TYPE d30 = *((__global DATA_TYPE *)(src_addr + 24 * src_stride_z)); - DATA_TYPE d31 = *((__global DATA_TYPE *)(src_addr + 25 * src_stride_z)); - DATA_TYPE d32 = *((__global DATA_TYPE *)(src_addr + 26 * src_stride_z)); - DATA_TYPE d33 = *((__global DATA_TYPE *)(src_addr + 27 * src_stride_z)); - DATA_TYPE d34 = *((__global DATA_TYPE *)(src_addr + 28 * src_stride_z)); - DATA_TYPE d35 = *((__global DATA_TYPE *)(src_addr + 29 * src_stride_z)); - DATA_TYPE d36 = *((__global DATA_TYPE *)(src_addr + 30 * src_stride_z)); - DATA_TYPE d37 = *((__global DATA_TYPE *)(src_addr + 31 * src_stride_z)); - - DATA_TYPE d40 = *((__global DATA_TYPE *)(src_addr + 32 * src_stride_z)); - DATA_TYPE d41 = *((__global DATA_TYPE *)(src_addr + 33 * src_stride_z)); - DATA_TYPE d42 = *((__global DATA_TYPE *)(src_addr + 34 * src_stride_z)); - DATA_TYPE d43 = *((__global DATA_TYPE *)(src_addr + 35 * src_stride_z)); - DATA_TYPE d44 = *((__global DATA_TYPE *)(src_addr + 36 * src_stride_z)); - DATA_TYPE d45 = *((__global DATA_TYPE *)(src_addr + 37 * src_stride_z)); - DATA_TYPE d46 = *((__global DATA_TYPE *)(src_addr + 38 * src_stride_z)); - DATA_TYPE d47 = *((__global DATA_TYPE *)(src_addr + 39 * src_stride_z)); - - DATA_TYPE d50 = *((__global DATA_TYPE *)(src_addr + 40 * src_stride_z)); - DATA_TYPE d51 = *((__global DATA_TYPE *)(src_addr + 41 * src_stride_z)); - DATA_TYPE d52 = *((__global DATA_TYPE *)(src_addr + 42 * src_stride_z)); - DATA_TYPE d53 = *((__global DATA_TYPE *)(src_addr + 43 * src_stride_z)); - DATA_TYPE d54 = *((__global DATA_TYPE *)(src_addr + 44 * src_stride_z)); - DATA_TYPE d55 = *((__global DATA_TYPE *)(src_addr + 45 * src_stride_z)); - DATA_TYPE d56 = *((__global DATA_TYPE *)(src_addr + 46 * src_stride_z)); - DATA_TYPE d57 = *((__global DATA_TYPE *)(src_addr + 47 * src_stride_z)); + // Store the tile in reverse order so the invalid values are overwritten with the valid ones + T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 4, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y); - DATA_TYPE d60 = *((__global DATA_TYPE *)(src_addr + 48 * src_stride_z)); - DATA_TYPE d61 = *((__global DATA_TYPE *)(src_addr + 49 * src_stride_z)); - DATA_TYPE d62 = *((__global DATA_TYPE *)(src_addr + 50 * src_stride_z)); - DATA_TYPE d63 = *((__global DATA_TYPE *)(src_addr + 51 * src_stride_z)); - DATA_TYPE d64 = *((__global DATA_TYPE *)(src_addr + 52 * src_stride_z)); - DATA_TYPE d65 = *((__global DATA_TYPE *)(src_addr + 53 * src_stride_z)); - DATA_TYPE d66 = *((__global DATA_TYPE *)(src_addr + 54 * src_stride_z)); - DATA_TYPE d67 = *((__global DATA_TYPE *)(src_addr + 55 * src_stride_z)); +#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL) + // Calculate the indirect Y for the source tensor + TILE(DATA_TYPE, 64, N0, in) = { { 0 } }; + TILE(DATA_TYPE, 6, N0, tmp) = { { 0 } }; + TILE(uint, 64, 1, src_indirect_y) = { { 0 } }; - DATA_TYPE d70 = *((__global DATA_TYPE *)(src_addr + 56 * src_stride_z)); - DATA_TYPE d71 = *((__global DATA_TYPE *)(src_addr + 57 * src_stride_z)); - DATA_TYPE d72 = *((__global DATA_TYPE *)(src_addr + 58 * src_stride_z)); - DATA_TYPE d73 = *((__global DATA_TYPE *)(src_addr + 59 * src_stride_z)); - DATA_TYPE d74 = *((__global DATA_TYPE *)(src_addr + 60 * src_stride_z)); - DATA_TYPE d75 = *((__global DATA_TYPE *)(src_addr + 61 * src_stride_z)); - DATA_TYPE d76 = *((__global DATA_TYPE *)(src_addr + 62 * src_stride_z)); - DATA_TYPE d77 = *((__global DATA_TYPE *)(src_addr + 63 * src_stride_z)); + LOOP_UNROLLING(int, i, 0, 64, 1) + { + src_indirect_y[i].v = mout + i * SRC_HEIGHT; + src_indirect_y[i].v += bout * (int)(SRC_HEIGHT * 64); + } - // Compute the 8x4 intermediate tensor - VEC_DATA_TYPE(float, 4) - comm_fact0, comm_fact1, comm_fact2; - VEC_DATA_TYPE(float, 4) - tmp_col0, tmp_col1, tmp_col2, tmp_col3, tmp_col4, tmp_col5, tmp_col6, tmp_col7; + // "in" here is 8x8 tile + T_LOAD_INDIRECT(DATA_TYPE, 64, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in); - COMPUTE_TMP_COL(tmp_col0, d00, d10, d20, d30, d40, d50, d60, d70, comm_fact0); - COMPUTE_TMP_COL(tmp_col1, d01, d11, d21, d31, d41, d51, d61, d71, comm_fact0); - COMPUTE_TMP_COL(tmp_col2, d02, d12, d22, d32, d42, d52, d62, d72, comm_fact0); - COMPUTE_TMP_COL(tmp_col3, d03, d13, d23, d33, d43, d53, d63, d73, comm_fact0); - COMPUTE_TMP_COL(tmp_col4, d04, d14, d24, d34, d44, d54, d64, d74, comm_fact0); - COMPUTE_TMP_COL(tmp_col5, d05, d15, d25, d35, d45, d55, d65, d75, comm_fact0); - COMPUTE_TMP_COL(tmp_col6, d06, d16, d26, d36, d46, d56, d66, d76, comm_fact0); - COMPUTE_TMP_COL(tmp_col7, d07, d17, d27, d37, d47, d57, d67, d77, comm_fact0); + // A^T * in + LOOP_UNROLLING(int, i, 0, 8, 1) + { + tmp[0].v = in[8 + i].v + in[16 + i].v; + tmp[1].v = in[8 + i].v - in[16 + i].v; + tmp[2].v = in[24 + i].v + in[32 + i].v; + tmp[3].v = in[24 + i].v - in[32 + i].v; + tmp[3].v = tmp[3].v + tmp[3].v; + tmp[4].v = in[40 + i].v + in[48 + i].v; + tmp[4].v = tmp[4].v + tmp[4].v; + tmp[5].v = in[40 + i].v - in[48 + i].v; + + // 4x8 matrix as a result + in[i].v = in[i].v + tmp[0].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[4].v, tmp[2].v); + in[8 + i].v = tmp[1].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[5].v, tmp[3].v); + in[16 + i].v = tmp[0].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[2].v, tmp[4].v); + in[24 + i].v = tmp[1].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[3].v, tmp[5].v) + in[56 + i].v; + } // Compute the output tile - comm_fact0 = tmp_col1 + tmp_col2; - comm_fact1 = tmp_col3 + tmp_col4; - comm_fact2 = tmp_col5 + tmp_col6; + TILE(DATA_TYPE, 16, N0, out) = { { 0 } }; - VEC_DATA_TYPE(float, 4) - out_col0 = comm_fact0 + comm_fact1 + 8.f * comm_fact2 + tmp_col0; - VEC_DATA_TYPE(float, 4) - out_col2 = comm_fact0 + 4.f * comm_fact1 + 2.f * comm_fact2; + // in * A, with in = A^T * in as above + LOOP_UNROLLING(int, i, 0, 4, 1) + { + tmp[0].v = in[8 * i + 1].v + in[8 * i + 2].v; + tmp[1].v = in[8 * i + 1].v - in[8 * i + 2].v; + tmp[2].v = in[8 * i + 3].v + in[8 * i + 4].v; + tmp[3].v = in[8 * i + 3].v - in[8 * i + 4].v; + tmp[3].v = tmp[3].v + tmp[3].v; + tmp[4].v = in[8 * i + 5].v + in[8 * i + 6].v; + tmp[4].v = tmp[4].v + tmp[4].v; + tmp[5].v = in[8 * i + 5].v - in[8 * i + 6].v; + + // 4x4 tile + out[4 * i].v = in[8 * i].v + tmp[0].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[4].v, tmp[2].v); + out[4 * i + 1].v = tmp[1].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[5].v, tmp[3].v); + out[4 * i + 2].v = fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[2].v, tmp[0].v) + tmp[4].v; + out[4 * i + 3].v = fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[3].v, tmp[1].v) + tmp[5].v + in[8 * i + 7].v; + } - comm_fact0 = tmp_col1 - tmp_col2; - comm_fact1 = tmp_col3 - tmp_col4; - comm_fact2 = tmp_col5 - tmp_col6; +#if defined(HAS_BIAS) + TILE(DATA_TYPE, 1, N0, b); - VEC_DATA_TYPE(float, 4) - out_col1 = comm_fact0 + 2.f * comm_fact1 + 4.f * comm_fact2; - VEC_DATA_TYPE(float, 4) - out_col3 = comm_fact0 + 8.f * comm_fact1 + comm_fact2 + tmp_col7; + T_LOAD(DATA_TYPE, 1, N0, BUFFER, bias, cout, 0, 0, b); -#if defined(HAS_BIAS) - // Add bias - Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias); + // c = c + bias[broadcasted] + T_ADD_BROADCAST_X(DATA_TYPE, 16, N0, out, b, out); +#endif // HAS_BIAS - DATA_TYPE b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, x_out))); + int x_out = (mout % NUM_TILES_X) * OUTPUT_TILE_W; + int y_out = (mout / NUM_TILES_X) * OUTPUT_TILE_H; - out_col0 += (VEC_DATA_TYPE(float, 4))b; - out_col1 += (VEC_DATA_TYPE(float, 4))b; - out_col2 += (VEC_DATA_TYPE(float, 4))b; - out_col3 += (VEC_DATA_TYPE(float, 4))b; -#endif // defined(HAS_BIAS) + T_ACTIVATION(DATA_TYPE, 16, N0, ACTIVATION_TYPE, A_VAL, B_VAL, out, out); - int4 offset_y = min((int4)y_out + (int4)(0, 1, 2, 3), (int4)((int)DST_WIDTH - 1)) * (int4)dst_stride_y; - int4 offset_z = min((int4)z_out + (int4)(0, 1, 2, 3), (int4)((int)DST_HEIGHT - 1)) * (int4)dst_stride_z; + TILE(uint, 16, 1, dst_indirect_y) = { { 0 } }; - // Store the output tile - VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - out_col0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT(out_col0, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)), A_VAL, B_VAL); - VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - out_col1_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT(out_col1, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)), A_VAL, B_VAL); - VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - out_col2_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT(out_col2, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)), A_VAL, B_VAL); - VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) - out_col3_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT(out_col3, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)), A_VAL, B_VAL); + // Calculate the destination indirect Y + LOOP_UNROLLING(int, yk, 0, 4, 1) + { + LOOP_UNROLLING(int, xk, 0, 4, 1) + { + int x_c = min(x_out + xk, ((int)DST_WIDTH - 1)); + int y_c = min(y_out + yk, ((int)DST_HEIGHT - 1)); + dst_indirect_y[xk + yk * 4].v = x_c + y_c * DST_WIDTH; + dst_indirect_y[xk + yk * 4].v += bout * (int)(DST_WIDTH * DST_HEIGHT); + } + } - // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element - // is overwritten with the valid one - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3 + offset_z.s3)) = out_col3_dt.s3; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2 + offset_z.s3)) = out_col2_dt.s3; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s3)) = out_col1_dt.s3; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s3)) = out_col0_dt.s3; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3 + offset_z.s2)) = out_col3_dt.s2; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2 + offset_z.s2)) = out_col2_dt.s2; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s2)) = out_col1_dt.s2; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s2)) = out_col0_dt.s2; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3 + offset_z.s1)) = out_col3_dt.s1; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2 + offset_z.s1)) = out_col2_dt.s1; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s1)) = out_col1_dt.s1; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s1)) = out_col0_dt.s1; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s3 + offset_z.s0)) = out_col3_dt.s0; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s2 + offset_z.s0)) = out_col2_dt.s0; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s0)) = out_col1_dt.s0; - *((__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s0)) = out_col0_dt.s0; + // Store the tile in reverse order so the invalid values are overwritten with the valid ones + T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 16, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y); #endif // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL) } #endif // defined(VEC_SIZE) && VEC_SIZE == 4 -- cgit v1.2.1