aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL/cl_kernels/winograd_output_transform.cl
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2021-04-08 16:41:51 +0100
committerSheri Zhang <sheri.zhang@arm.com>2021-04-09 14:46:04 +0000
commit83eee19e924df4034baa69307ffbf93f773ec041 (patch)
tree00ecb57f5fba70ba377735d47c8ca0fb278a195a /src/core/CL/cl_kernels/winograd_output_transform.cl
parentec82b95f26198dc538f94aa90f0febfaf0eb2751 (diff)
downloadComputeLibrary-83eee19e924df4034baa69307ffbf93f773ec041.tar.gz
Winograd Output transform 7x7 rework
Resolve COMPMID-4140 Change-Id: I17db0ee596665598d08d4359a373160f21ab9acd Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5390 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/CL/cl_kernels/winograd_output_transform.cl')
-rw-r--r--src/core/CL/cl_kernels/winograd_output_transform.cl315
1 files changed, 120 insertions, 195 deletions
diff --git a/src/core/CL/cl_kernels/winograd_output_transform.cl b/src/core/CL/cl_kernels/winograd_output_transform.cl
index 59402bf9b3..b050c56cde 100644
--- a/src/core/CL/cl_kernels/winograd_output_transform.cl
+++ b/src/core/CL/cl_kernels/winograd_output_transform.cl
@@ -177,22 +177,18 @@ __kernel void winograd_output_transform_2x2_3x3_nchw(
#endif // !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
}
-#define COMPUTE_TMP_COL_2x2_7x7(col, d0, d1, d2, d3, d4, d5, d6, d7) \
- ({ \
- col.s0 = d0 + d1 + d2 + d3 + d4 + d5 + d6; \
- col.s1 = -d1 + d2 - 2 * d3 + 2 * d4 + -3 * d5 + 3 * d6 + d7; \
- })
-
/** This OpenCL kernel performs Winograd output transform when the output tile is 2x2/2x1 or 1x2, the filter size 7x7/7x1 or 1x7 and the data layout is NHWC
*
* @note The number of tiles along the X direction must be passed at compile time using -DNUM_TILES_X: e.g. -DNUM_TILES_X=16
* @note The width of the output tile must be passed at compile time using -DOUTPUT_TILE_W: e.g. -DOUTPUT_TILE_W=2
* @note The height of the output tile must be passed at compile time using -DOUTPUT_TILE_H: e.g. -DOUTPUT_TILE_H=2
+ * @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT: e.g. -DSRC_HEIGHT=32
* @note The width of the output tensor must be passed at compile time using -DDST_WIDTH: e.g. -DDST_WIDTH=24
* @note The height of the output tensor must be passed at compile time using -DDST_HEIGHT: e.g. -DDST_HEIGHT=32
* @note If this kernel is used to perform Winograd output transform 7x1, -DWINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL has to be passed at compile time
* @note If this kernel is used to perform Winograd output transform 1x7, -DWINOGRAD_OUTPUT_TRANSFORM_VERTICAL has to be passed at compile time
* @note The data type must be passed at compile time using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types: float/half.
+ * @note The number of output elements processed along the X direction must be passed at compile time using -DN0 e.g. -DN0=1
*
* @param[in] src_ptr Pointer to the source tensor. Supported data types: F32/F16
* @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
@@ -216,202 +212,131 @@ __kernel void winograd_output_transform_2x2_3x3_nchw(
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
__kernel void winograd_output_transform_2x2_7x7_nhwc(
- TENSOR4D_DECLARATION(src),
- TENSOR4D_DECLARATION(dst),
+ TENSOR4D(src, BUFFER),
+ TENSOR4D(dst, BUFFER),
#if defined(HAS_BIAS)
VECTOR_DECLARATION(bias),
#endif // defined(HAS_BIAS)
int dst_size)
{
- // Each thread stores a 4x4/4x1 or 1x4 tile
-#if defined(SRC_DEPTH)
- Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH);
- const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
-#else /* defined(SRC_DEPTH) */
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
-#endif /* defined(SRC_DEPTH) */
+#define _ISRC_HEIGHT SRC_HEIGHT
+#define _IDST_WIDTH DST_WIDTH
+#define _IDST_HEIGHT DST_HEIGHT
+#define _INUM_TILES_X NUM_TILES_X
- int y_in = get_global_id(1);
- int x_out = get_global_id(0);
- int y_out = (y_in % NUM_TILES_X) * OUTPUT_TILE_W;
- int z_out = (y_in / NUM_TILES_X) * OUTPUT_TILE_H;
-#if defined(SRC_DEPTH)
- int batch = get_global_id(2) / SRC_DEPTH;
-#endif /* defined(SRC_DEPTH) */
+ const int cout = GET_SPATIAL_IDX(0, N0, 0); // OFM
+ const int mout = GET_SPATIAL_IDX(1, 1, 0); // WINOGRAD OUTPUT TILES
+ const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX
- __global unsigned char *dst_base_ptr = dst_ptr + dst_offset_first_element_in_bytes + x_out * sizeof(DATA_TYPE);
+ int x_out = (mout % _INUM_TILES_X) * OUTPUT_TILE_W;
+ int y_out = (mout / _INUM_TILES_X) * OUTPUT_TILE_H;
-#if defined(SRC_DEPTH)
- dst_base_ptr += batch * dst_stride_w;
-#endif // defined(SRC_DEPTH)
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ TILE(DATA_TYPE, 8, N0, in);
+ TILE(DATA_TYPE, 2, N0, out);
+ TILE(uint, 8, 1, src_indirect_y);
- // Load the values across the channels to compose the input tile
- DATA_TYPE d00 = *((__global DATA_TYPE *)(src_addr + 0 * src_stride_z));
- DATA_TYPE d01 = *((__global DATA_TYPE *)(src_addr + 1 * src_stride_z));
- DATA_TYPE d02 = *((__global DATA_TYPE *)(src_addr + 2 * src_stride_z));
- DATA_TYPE d03 = *((__global DATA_TYPE *)(src_addr + 3 * src_stride_z));
- DATA_TYPE d04 = *((__global DATA_TYPE *)(src_addr + 4 * src_stride_z));
- DATA_TYPE d05 = *((__global DATA_TYPE *)(src_addr + 5 * src_stride_z));
- DATA_TYPE d06 = *((__global DATA_TYPE *)(src_addr + 6 * src_stride_z));
- DATA_TYPE d07 = *((__global DATA_TYPE *)(src_addr + 7 * src_stride_z));
+ // Calculate the indirect Y for the source tensor
+ LOOP_UNROLLING(int, i, 0, 8, 1)
+ {
+ src_indirect_y[i].v = mout + i * _ISRC_HEIGHT;
+ src_indirect_y[i].v += bout * (int)(_ISRC_HEIGHT * 8);
+ }
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- // Compute out00, out01, out02 and out03
- float out00 = d00 + d01 + d02 + d03 + d04 + d05 + d06;
- float out01 = -d01 + d02 - 2.f * d03 + 2.0f * d04 - 3.0f * d05 + 3.0f * d06 + d07;
+ // Load the values across the 8 channels to compose the 8x1 tile
+ T_LOAD_INDIRECT(DATA_TYPE, 8, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in);
+
+ // Compute out0 and out01
+ out[0].v = in[0].v + in[1].v + in[2].v + in[3].v + in[4].v + in[5].v + in[6].v;
+ out[1].v = -in[1].v + in[2].v - 2.f * in[3].v + 2.0f * in[4].v - 3.0f * in[5].v + 3.0f * in[6].v + in[7].v;
#if defined(HAS_BIAS)
// Add bias
- Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
+ TILE(DATA_TYPE, 1, N0, b);
- float b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, x_out)));
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, bias, cout, 0, 0, b);
- out00 += (float)b;
- out01 += (float)b;
+ T_ADD_BROADCAST_X(DATA_TYPE, 2, N0, out, b, out);
#endif // defined(HAS_BIAS)
- // Store the output tile
-#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- dst_base_ptr += y_out * dst_stride_y;
-
- int2 offset_z = min((int2)z_out + (int2)(0, 1), (int2)((int)DST_HEIGHT - 1)) * (int2)dst_stride_z;
+ T_ACTIVATION(DATA_TYPE, 2, N0, ACTIVATION_TYPE, A_VAL, B_VAL, out, out);
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 2))(out00, out01), VEC_DATA_TYPE(DATA_TYPE, 2)), A_VAL, B_VAL);
+ TILE(uint, 2, 1, dst_indirect_y);
- // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element
- // is overwritten with the valid one
- *(__global DATA_TYPE *)(dst_base_ptr + offset_z.s1) = out0_dt.s1;
- *(__global DATA_TYPE *)(dst_base_ptr + offset_z.s0) = out0_dt.s0;
+#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+ LOOP_UNROLLING(int, yk, 0, 2, 1)
+ {
+ int y_c = min(y_out + yk, ((int)_IDST_HEIGHT - 1));
+ dst_indirect_y[yk].v = x_out + y_c * (int)(_IDST_WIDTH);
+ }
#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- dst_base_ptr += z_out * dst_stride_z;
-
- int2 offset_y = min((int2)y_out + (int2)(0, 1), (int2)((int)DST_WIDTH - 1)) * (int2)dst_stride_y;
-
- VEC_DATA_TYPE(DATA_TYPE, 2)
- out0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT((VEC_DATA_TYPE(float, 2))(out00, out01), VEC_DATA_TYPE(DATA_TYPE, 2)), A_VAL,
- B_VAL);
-
- // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element
- // is overwritten with the valid one
- *(__global DATA_TYPE *)(dst_base_ptr + offset_y.s1) = out0_dt.s1;
- *(__global DATA_TYPE *)(dst_base_ptr + offset_y.s0) = out0_dt.s0;
+ LOOP_UNROLLING(int, xk, 0, 2, 1)
+ {
+ int x_c = min(x_out + xk, ((int)_IDST_WIDTH - 1));
+ dst_indirect_y[xk].v = x_c + y_out * (int)(_IDST_WIDTH);
+ }
#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
-
- DATA_TYPE d10 = *((__global DATA_TYPE *)(src_addr + 8 * src_stride_z));
- DATA_TYPE d11 = *((__global DATA_TYPE *)(src_addr + 9 * src_stride_z));
- DATA_TYPE d12 = *((__global DATA_TYPE *)(src_addr + 10 * src_stride_z));
- DATA_TYPE d13 = *((__global DATA_TYPE *)(src_addr + 11 * src_stride_z));
- DATA_TYPE d14 = *((__global DATA_TYPE *)(src_addr + 12 * src_stride_z));
- DATA_TYPE d15 = *((__global DATA_TYPE *)(src_addr + 13 * src_stride_z));
- DATA_TYPE d16 = *((__global DATA_TYPE *)(src_addr + 14 * src_stride_z));
- DATA_TYPE d17 = *((__global DATA_TYPE *)(src_addr + 15 * src_stride_z));
-
- DATA_TYPE d20 = *((__global DATA_TYPE *)(src_addr + 16 * src_stride_z));
- DATA_TYPE d21 = *((__global DATA_TYPE *)(src_addr + 17 * src_stride_z));
- DATA_TYPE d22 = *((__global DATA_TYPE *)(src_addr + 18 * src_stride_z));
- DATA_TYPE d23 = *((__global DATA_TYPE *)(src_addr + 19 * src_stride_z));
- DATA_TYPE d24 = *((__global DATA_TYPE *)(src_addr + 20 * src_stride_z));
- DATA_TYPE d25 = *((__global DATA_TYPE *)(src_addr + 21 * src_stride_z));
- DATA_TYPE d26 = *((__global DATA_TYPE *)(src_addr + 22 * src_stride_z));
- DATA_TYPE d27 = *((__global DATA_TYPE *)(src_addr + 23 * src_stride_z));
-
- DATA_TYPE d30 = *((__global DATA_TYPE *)(src_addr + 24 * src_stride_z));
- DATA_TYPE d31 = *((__global DATA_TYPE *)(src_addr + 25 * src_stride_z));
- DATA_TYPE d32 = *((__global DATA_TYPE *)(src_addr + 26 * src_stride_z));
- DATA_TYPE d33 = *((__global DATA_TYPE *)(src_addr + 27 * src_stride_z));
- DATA_TYPE d34 = *((__global DATA_TYPE *)(src_addr + 28 * src_stride_z));
- DATA_TYPE d35 = *((__global DATA_TYPE *)(src_addr + 29 * src_stride_z));
- DATA_TYPE d36 = *((__global DATA_TYPE *)(src_addr + 30 * src_stride_z));
- DATA_TYPE d37 = *((__global DATA_TYPE *)(src_addr + 31 * src_stride_z));
-
- DATA_TYPE d40 = *((__global DATA_TYPE *)(src_addr + 32 * src_stride_z));
- DATA_TYPE d41 = *((__global DATA_TYPE *)(src_addr + 33 * src_stride_z));
- DATA_TYPE d42 = *((__global DATA_TYPE *)(src_addr + 34 * src_stride_z));
- DATA_TYPE d43 = *((__global DATA_TYPE *)(src_addr + 35 * src_stride_z));
- DATA_TYPE d44 = *((__global DATA_TYPE *)(src_addr + 36 * src_stride_z));
- DATA_TYPE d45 = *((__global DATA_TYPE *)(src_addr + 37 * src_stride_z));
- DATA_TYPE d46 = *((__global DATA_TYPE *)(src_addr + 38 * src_stride_z));
- DATA_TYPE d47 = *((__global DATA_TYPE *)(src_addr + 39 * src_stride_z));
+ // Store the tile in reverse order so the invalid values are overwritten with the valid ones
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 2, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
- DATA_TYPE d50 = *((__global DATA_TYPE *)(src_addr + 40 * src_stride_z));
- DATA_TYPE d51 = *((__global DATA_TYPE *)(src_addr + 41 * src_stride_z));
- DATA_TYPE d52 = *((__global DATA_TYPE *)(src_addr + 42 * src_stride_z));
- DATA_TYPE d53 = *((__global DATA_TYPE *)(src_addr + 43 * src_stride_z));
- DATA_TYPE d54 = *((__global DATA_TYPE *)(src_addr + 44 * src_stride_z));
- DATA_TYPE d55 = *((__global DATA_TYPE *)(src_addr + 45 * src_stride_z));
- DATA_TYPE d56 = *((__global DATA_TYPE *)(src_addr + 46 * src_stride_z));
- DATA_TYPE d57 = *((__global DATA_TYPE *)(src_addr + 47 * src_stride_z));
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- DATA_TYPE d60 = *((__global DATA_TYPE *)(src_addr + 48 * src_stride_z));
- DATA_TYPE d61 = *((__global DATA_TYPE *)(src_addr + 49 * src_stride_z));
- DATA_TYPE d62 = *((__global DATA_TYPE *)(src_addr + 50 * src_stride_z));
- DATA_TYPE d63 = *((__global DATA_TYPE *)(src_addr + 51 * src_stride_z));
- DATA_TYPE d64 = *((__global DATA_TYPE *)(src_addr + 52 * src_stride_z));
- DATA_TYPE d65 = *((__global DATA_TYPE *)(src_addr + 53 * src_stride_z));
- DATA_TYPE d66 = *((__global DATA_TYPE *)(src_addr + 54 * src_stride_z));
- DATA_TYPE d67 = *((__global DATA_TYPE *)(src_addr + 55 * src_stride_z));
+ TILE(DATA_TYPE, 64, N0, in);
+ TILE(DATA_TYPE, 4, N0, out);
+ TILE(DATA_TYPE, 16, N0, tmp);
+ TILE(uint, 64, 1, src_indirect_y);
- DATA_TYPE d70 = *((__global DATA_TYPE *)(src_addr + 56 * src_stride_z));
- DATA_TYPE d71 = *((__global DATA_TYPE *)(src_addr + 57 * src_stride_z));
- DATA_TYPE d72 = *((__global DATA_TYPE *)(src_addr + 58 * src_stride_z));
- DATA_TYPE d73 = *((__global DATA_TYPE *)(src_addr + 59 * src_stride_z));
- DATA_TYPE d74 = *((__global DATA_TYPE *)(src_addr + 60 * src_stride_z));
- DATA_TYPE d75 = *((__global DATA_TYPE *)(src_addr + 61 * src_stride_z));
- DATA_TYPE d76 = *((__global DATA_TYPE *)(src_addr + 62 * src_stride_z));
- DATA_TYPE d77 = *((__global DATA_TYPE *)(src_addr + 63 * src_stride_z));
+ // Calculate the indirect Y for the source tensor
+ LOOP_UNROLLING(int, i, 0, 64, 1)
+ {
+ src_indirect_y[i].v = mout + i * _ISRC_HEIGHT;
+ src_indirect_y[i].v += bout * (int)(_ISRC_HEIGHT * 64);
+ }
- // Compute the 8x2 intermediate tensor
- VEC_DATA_TYPE(float, 2)
- tmp_col0, tmp_col1, tmp_col2, tmp_col3, tmp_col4, tmp_col5, tmp_col6, tmp_col7;
+ // Load the values across the 64 channels to compose the 8x8 tile
+ T_LOAD_INDIRECT(DATA_TYPE, 64, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col0, d00, d10, d20, d30, d40, d50, d60, d70);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col1, d01, d11, d21, d31, d41, d51, d61, d71);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col2, d02, d12, d22, d32, d42, d52, d62, d72);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col3, d03, d13, d23, d33, d43, d53, d63, d73);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col4, d04, d14, d24, d34, d44, d54, d64, d74);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col5, d05, d15, d25, d35, d45, d55, d65, d75);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col6, d06, d16, d26, d36, d46, d56, d66, d76);
- COMPUTE_TMP_COL_2x2_7x7(tmp_col7, d07, d17, d27, d37, d47, d57, d67, d77);
+ LOOP_UNROLLING(int, i, 0, 8, 1)
+ {
+ tmp[i * 2].v = in[0 + i].v + in[8 + i].v + in[16 + i].v + in[24 + i].v + in[32 + i].v + in[40 + i].v + in[48 + i].v;
+ tmp[i * 2 + 1].v = -in[8 + i].v + in[16 + i].v - 2 * in[24 + i].v + 2 * in[32 + i].v + -3 * in[40 + i].v + 3 * in[48 + i].v + in[56 + i].v;
+ }
// Compute the 2x2 output tile
- VEC_DATA_TYPE(float, 2)
- out_col0 = tmp_col0 + tmp_col1 + tmp_col2 + tmp_col3 + tmp_col4 + tmp_col5 + tmp_col6;
- VEC_DATA_TYPE(float, 2)
- out_col1 = -tmp_col1 + tmp_col2 - 2 * tmp_col3 + 2 * tmp_col4 - 3 * tmp_col5 + 3 * tmp_col6 + tmp_col7;
+ LOOP_UNROLLING(int, i, 0, 2, 1)
+ {
+ out[i * 2].v = tmp[0 + i].v + tmp[2 + i].v + tmp[4 + i].v + tmp[6 + i].v + tmp[8 + i].v + tmp[10 + i].v + tmp[12 + i].v;
+ out[i * 2 + 1].v = -tmp[2 + i].v + tmp[4 + i].v - 2 * tmp[6 + i].v + 2 * tmp[8 + i].v - 3 * tmp[10 + i].v + 3 * tmp[12 + i].v + tmp[14 + i].v;
+ }
#if defined(HAS_BIAS)
// Add bias
- Vector bias = CONVERT_TO_VECTOR_STRUCT_NO_STEP(bias);
-
- DATA_TYPE b = (float) * ((__global DATA_TYPE *)(vector_offset(&bias, x_out)));
+ TILE(DATA_TYPE, 1, N0, b);
- out_col0 += (VEC_DATA_TYPE(float, 2))b;
- out_col1 += (VEC_DATA_TYPE(float, 2))b;
+ T_LOAD(DATA_TYPE, 1, N0, BUFFER, bias, cout, 0, 0, b);
+ T_ADD_BROADCAST_X(DATA_TYPE, 4, N0, out, b, out);
#endif // defined(HAS_BIAS)
- int2 offset_y = min((int2)y_out + (int2)(0, 1), (int2)((int)DST_WIDTH - 1)) * (int2)dst_stride_y;
- int2 offset_z = min((int2)z_out + (int2)(0, 1), (int2)((int)DST_HEIGHT - 1)) * (int2)dst_stride_z;
+ T_ACTIVATION(DATA_TYPE, 4, N0, ACTIVATION_TYPE, A_VAL, B_VAL, out, out);
- // Store the output tile
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- out_col0_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT(out_col0, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)), A_VAL, B_VAL);
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- out_col1_dt = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, CONVERT(out_col1, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)), A_VAL, B_VAL);
-
- // To avoid the out-of-bound write, we store the elements in reverse order so the invalid element
- // is overwritten with the valid one
- *(__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s1) = out_col1_dt.s1;
- *(__global DATA_TYPE *)(dst_base_ptr + offset_y.s1 + offset_z.s0) = out_col1_dt.s0;
- *(__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s1) = out_col0_dt.s1;
- *(__global DATA_TYPE *)(dst_base_ptr + offset_y.s0 + offset_z.s0) = out_col0_dt.s0;
+ TILE(uint, 4, 1, dst_indirect_y);
+ // Calculate the destination indirect Y
+ LOOP_UNROLLING(int, yk, 0, 2, 1)
+ {
+ LOOP_UNROLLING(int, xk, 0, 2, 1)
+ {
+ int x_c = min(x_out + xk, ((int)_IDST_WIDTH - 1));
+ int y_c = min(y_out + yk, ((int)_IDST_HEIGHT - 1));
+ dst_indirect_y[xk + yk * 2].v = x_c + y_c * _IDST_WIDTH;
+ dst_indirect_y[xk + yk * 2].v += bout * (int)(_IDST_WIDTH * _IDST_HEIGHT);
+ }
+ }
+
+ // Store the tile in reverse order so the invalid values are overwritten with the valid ones
+ T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 4, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
#endif // !defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) && !defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
}
#endif // defined(VEC_SIZE) && VEC_SIZE == 2
@@ -461,8 +386,8 @@ __kernel void winograd_output_transform_4x4_3x3_nchw(
Tensor4D src = CONVERT_TO_TENSOR4D_STRUCT(src, SRC_DEPTH);
const __global uchar *src_addr = tensor4D_offset(&src, 0, 0, 0, 0);
#else /* defined(SRC_DEPTH) */
- Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
- const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
+ Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
+ const __global uchar *src_addr = tensor3D_offset(&src, 0, 0, 0);
#endif /* defined(SRC_DEPTH) */
// Load the values across the channels to compose the 6x6 or 6x1 tile
@@ -690,9 +615,9 @@ __kernel void winograd_output_transform_4x4_3x3_nhwc(
#if defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- TILE(DATA_TYPE, 6, N0, in) = {{ { 0 } }};
- TILE(DATA_TYPE, 4, N0, out) = {{ { 0 } }};
- TILE(uint, 6, 1, src_indirect_y) = {{ { 0 } }};
+ TILE(DATA_TYPE, 6, N0, in) = { { { 0 } } };
+ TILE(DATA_TYPE, 4, N0, out) = { { { 0 } } };
+ TILE(uint, 6, 1, src_indirect_y) = { { { 0 } } };
LOOP_UNROLLING(int, i, 0, 6, 1)
{
@@ -723,7 +648,7 @@ __kernel void winograd_output_transform_4x4_3x3_nhwc(
T_ACTIVATION(DATA_TYPE, 4, N0, ACTIVATION_TYPE, A_VAL, B_VAL, out, out);
- TILE(uint, 4, 1, dst_indirect_y) = {{ { 0 } }};
+ TILE(uint, 4, 1, dst_indirect_y) = { { { 0 } } };
// Calculate the destination indirect Y
#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
@@ -733,7 +658,7 @@ __kernel void winograd_output_transform_4x4_3x3_nhwc(
dst_indirect_y[yk].v = x_out + y_c * DST_WIDTH;
dst_indirect_y[yk].v += bout * (int)(DST_WIDTH * DST_HEIGHT);
}
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
LOOP_UNROLLING(int, xk, 0, 4, 1)
{
int x_c = min(x_out + xk, ((int)DST_WIDTH - 1));
@@ -748,9 +673,9 @@ __kernel void winograd_output_transform_4x4_3x3_nhwc(
#else // defined(WINOGRAD_OUTPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
// Calculate the indirect Y for the source tensor
- TILE(DATA_TYPE, 36, N0, in) = {{ { 0 } }};
- TILE(DATA_TYPE, 4, N0, tmp) = {{ { 0 } }};
- TILE(uint, 36, 1, src_indirect_y) = {{ { 0 } }};
+ TILE(DATA_TYPE, 36, N0, in) = { { { 0 } } };
+ TILE(DATA_TYPE, 4, N0, tmp) = { { { 0 } } };
+ TILE(uint, 36, 1, src_indirect_y) = { { { 0 } } };
LOOP_UNROLLING(int, i, 0, 36, 1)
{
@@ -775,7 +700,7 @@ __kernel void winograd_output_transform_4x4_3x3_nhwc(
}
// Compute the output tile
- TILE(DATA_TYPE, 16, N0, out) = {{ { 0 } }};
+ TILE(DATA_TYPE, 16, N0, out) = { { { 0 } } };
LOOP_UNROLLING(int, i, 0, 4, 1)
{
@@ -804,7 +729,7 @@ __kernel void winograd_output_transform_4x4_3x3_nhwc(
T_ACTIVATION(DATA_TYPE, 16, N0, ACTIVATION_TYPE, A_VAL, B_VAL, out, out);
- TILE(uint, 16, 1, dst_indirect_y) = {{ { 0 } }};
+ TILE(uint, 16, 1, dst_indirect_y) = { { { 0 } } };
// Calculate the destination indirect Y
LOOP_UNROLLING(int, yk, 0, 4, 1)
@@ -1105,7 +1030,7 @@ __kernel void winograd_output_transform_4x4_5x5_nchw(
* @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
- __kernel void winograd_output_transform_4x4_5x5_nhwc(
+__kernel void winograd_output_transform_4x4_5x5_nhwc(
TENSOR4D(src, BUFFER),
TENSOR4D(dst, BUFFER),
#if defined(HAS_BIAS)
@@ -1138,7 +1063,7 @@ __kernel void winograd_output_transform_4x4_5x5_nchw(
tmp[2].v = 2.0f * (in[5].v + in[6].v);
tmp[3].v = in[3].v + in[4].v;
out[0].v = in[0].v + in[1].v + in[2].v + tmp[3].v + 4.0f * tmp[2].v;
- out[1].v = tmp[0].v + tmp[1].v + 4.0f * (in[5].v - in[6].v) ;
+ out[1].v = tmp[0].v + tmp[1].v + 4.0f * (in[5].v - in[6].v);
out[2].v = in[1].v + in[2].v + 4.0f * tmp[3].v + tmp[2].v;
out[3].v = tmp[0].v + 4.0f * tmp[1].v + in[5].v - in[6].v + in[7].v;
@@ -1166,7 +1091,7 @@ __kernel void winograd_output_transform_4x4_5x5_nchw(
dst_indirect_y[yk].v = x_out + y_c * DST_WIDTH;
dst_indirect_y[yk].v += bout * (int)(DST_WIDTH * DST_HEIGHT);
}
-#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
+#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
LOOP_UNROLLING(int, xk, 0, 4, 1)
{
int x_c = min(x_out + xk, ((int)DST_WIDTH - 1));
@@ -1196,14 +1121,14 @@ __kernel void winograd_output_transform_4x4_5x5_nchw(
// A^T * in
LOOP_UNROLLING(int, i, 0, 8, 1)
{
- tmp[0].v = in[8 + i].v + in[16 + i].v;
- tmp[1].v = in[8 + i].v - in[16 + i].v;
- tmp[2].v = in[24 + i].v + in[32 + i].v;
- tmp[3].v = in[24 + i].v - in[32 + i].v;
- tmp[3].v = tmp[3].v + tmp[3].v;
- tmp[4].v = in[40 + i].v + in[48 + i].v;
- tmp[4].v = tmp[4].v + tmp[4].v;
- tmp[5].v = in[40 + i].v - in[48 + i].v;
+ tmp[0].v = in[8 + i].v + in[16 + i].v;
+ tmp[1].v = in[8 + i].v - in[16 + i].v;
+ tmp[2].v = in[24 + i].v + in[32 + i].v;
+ tmp[3].v = in[24 + i].v - in[32 + i].v;
+ tmp[3].v = tmp[3].v + tmp[3].v;
+ tmp[4].v = in[40 + i].v + in[48 + i].v;
+ tmp[4].v = tmp[4].v + tmp[4].v;
+ tmp[5].v = in[40 + i].v - in[48 + i].v;
// 4x8 matrix as a result
in[i].v = in[i].v + tmp[0].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[4].v, tmp[2].v);
@@ -1218,14 +1143,14 @@ __kernel void winograd_output_transform_4x4_5x5_nchw(
// in * A, with in = A^T * in as above
LOOP_UNROLLING(int, i, 0, 4, 1)
{
- tmp[0].v = in[8 * i + 1].v + in[8 * i + 2].v;
- tmp[1].v = in[8 * i + 1].v - in[8 * i + 2].v;
- tmp[2].v = in[8 * i + 3].v + in[8 * i + 4].v;
- tmp[3].v = in[8 * i + 3].v - in[8 * i + 4].v;
- tmp[3].v = tmp[3].v + tmp[3].v;
- tmp[4].v = in[8 * i + 5].v + in[8 * i + 6].v;
- tmp[4].v = tmp[4].v + tmp[4].v;
- tmp[5].v = in[8 * i + 5].v - in[8 * i + 6].v;
+ tmp[0].v = in[8 * i + 1].v + in[8 * i + 2].v;
+ tmp[1].v = in[8 * i + 1].v - in[8 * i + 2].v;
+ tmp[2].v = in[8 * i + 3].v + in[8 * i + 4].v;
+ tmp[3].v = in[8 * i + 3].v - in[8 * i + 4].v;
+ tmp[3].v = tmp[3].v + tmp[3].v;
+ tmp[4].v = in[8 * i + 5].v + in[8 * i + 6].v;
+ tmp[4].v = tmp[4].v + tmp[4].v;
+ tmp[5].v = in[8 * i + 5].v - in[8 * i + 6].v;
// 4x4 tile
out[4 * i].v = in[8 * i].v + tmp[0].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[4].v, tmp[2].v);