aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2021-05-13 16:58:51 +0100
committerGiorgio Arena <giorgio.arena@arm.com>2021-05-17 12:08:08 +0000
commitbdd16d1c4832ed416f24908b2c1d060aa4e42f32 (patch)
tree58e9fa3ebeca7a1bfa0cca23481f61ed30b4fb08
parent72ee9b4723485c3da077d765febf45f27acb78cb (diff)
downloadComputeLibrary-bdd16d1c4832ed416f24908b2c1d060aa4e42f32.tar.gz
Add macro to manually unroll loops in OpenCL
Change-Id: I092d10534816f5b3717325952033c351b8231380 Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5643 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/core/CL/cl_kernels/direct_convolution.cl24
-rw-r--r--src/core/CL/cl_kernels/tile_helpers.h401
-rw-r--r--src/core/CL/cl_kernels/winograd_input_transform.cl80
-rw-r--r--src/core/CL/cl_kernels/winograd_output_transform.cl120
4 files changed, 382 insertions, 243 deletions
diff --git a/src/core/CL/cl_kernels/direct_convolution.cl b/src/core/CL/cl_kernels/direct_convolution.cl
index a9a997f9ac..e303d2067d 100644
--- a/src/core/CL/cl_kernels/direct_convolution.cl
+++ b/src/core/CL/cl_kernels/direct_convolution.cl
@@ -141,21 +141,21 @@ __kernel void direct_convolution_nhwc(
TILE(int, M0, 1, yi);
// Convert the linear index to coordinate
- LOOP_UNROLLING(int, i, 0, M0, 1)
+ LOOP_UNROLLING(int, i, 0, 1, M0,
{
xi[i].v = ((mout + i) % _IDST_WIDTH) * STRIDE_X;
yi[i].v = ((mout + i) / _IDST_WIDTH) * STRIDE_Y;
xi[i].v -= PAD_LEFT;
yi[i].v -= PAD_TOP;
- }
+ })
// Initialize the accumulators
TILE(ACC_DATA_TYPE, M0, N0, c);
- LOOP_UNROLLING(int, i, 0, M0, 1)
+ LOOP_UNROLLING(int, i, 0, 1, M0,
{
c[i].v = 0;
- }
+ })
for(int i = 0; i < (_IWEI_WIDTH * _IWEI_HEIGHT); ++i)
{
@@ -169,13 +169,13 @@ __kernel void direct_convolution_nhwc(
TILE(SRC_DATA_TYPE, M0, K0, a);
TILE(WEI_DATA_TYPE, N0, K0, b);
- LOOP_UNROLLING(int, i, 0, M0, 1)
+ LOOP_UNROLLING(int, i, 0, 1, M0,
{
a[i].v = ZERO_VALUE;
- }
+ })
// Load tile from the src tensor
- T_LOAD_NHWC_INDIRECT(SRC_DATA_TYPE, 1, M0, K0, SRC_TENSOR_TYPE, src, bout, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, xi, yi, a);
+ T_LOAD_NHWC_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, bout, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, xi, yi, a);
// Load tile from the weights tensor
T_LOAD(WEI_DATA_TYPE, N0, K0, WEI_TENSOR_TYPE, wei, ck, cout * _IY_MULTIPLIER + i, _IY_MULTIPLIER, wei_stride_y, b);
@@ -199,13 +199,13 @@ __kernel void direct_convolution_nhwc(
TILE(SRC_DATA_TYPE, M0, 1, a);
TILE(WEI_DATA_TYPE, N0, 1, b);
- LOOP_UNROLLING(int, i, 0, M0, 1)
+ LOOP_UNROLLING(int, i, 0, 1, M0,
{
a[i].v = ZERO_VALUE;
- }
+ })
// Load tile from the src tensor
- T_LOAD_NHWC_INDIRECT(SRC_DATA_TYPE, 1, M0, 1, SRC_TENSOR_TYPE, src, bout, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, xi, yi, a);
+ T_LOAD_NHWC_INDIRECT(SRC_DATA_TYPE, M0, 1, SRC_TENSOR_TYPE, src, bout, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, xi, yi, a);
// Load tile from the weights tensor
// The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
@@ -240,11 +240,11 @@ __kernel void direct_convolution_nhwc(
TILE(uint, M0, 1, dst_indirect_y);
// Calculate the destination indirect Y
- LOOP_UNROLLING(int, i, 0, M0, 1)
+ LOOP_UNROLLING(int, i, 0, 1, M0,
{
dst_indirect_y[i].v = (uint)min(mout + i, (int)(_IDST_WIDTH * _IDST_HEIGHT) - 1);
dst_indirect_y[i].v += bout * (int)(_IDST_WIDTH * _IDST_HEIGHT);
- }
+ })
bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
diff --git a/src/core/CL/cl_kernels/tile_helpers.h b/src/core/CL/cl_kernels/tile_helpers.h
index 3d37f0d31f..4959c04448 100644
--- a/src/core/CL/cl_kernels/tile_helpers.h
+++ b/src/core/CL/cl_kernels/tile_helpers.h
@@ -22,6 +22,9 @@
* SOFTWARE.
*/
+// *INDENT-OFF*
+// clang-format off
+
/** Tile object
* A tile object is a 2D memory block and can be accessed using the following syntax:
* -# a[m0].v = access the the vector at row "m0" (OpenCL vector)
@@ -39,21 +42,21 @@
DATA_TYPE##W v; \
} BASENAME[H]
-#define TENSOR4D_IMAGE(name) \
- __read_only image2d_t name##_img, \
- __global uchar *name##_ptr, \
- uint name##_stride_x, \
- uint name##_step_x, \
- uint name##_stride_y, \
- uint name##_step_y, \
- uint name##_stride_z, \
- uint name##_step_z, \
- uint name##_stride_w, \
- uint name##_step_w, \
+#define TENSOR4D_IMAGE(name) \
+ __read_only image2d_t name##_img, \
+ __global uchar *name##_ptr, \
+ uint name##_stride_x, \
+ uint name##_step_x, \
+ uint name##_stride_y, \
+ uint name##_step_y, \
+ uint name##_stride_z, \
+ uint name##_step_z, \
+ uint name##_stride_w, \
+ uint name##_step_w, \
uint name##_offset_first_element_in_bytes
-#define TENSOR4D_BUFFER(name) \
- __global uchar *name##_ptr, \
+#define TENSOR4D_BUFFER(name) \
+ __global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
@@ -67,9 +70,143 @@
#define TENSOR4D_STR(name, type) TENSOR4D_##type(name)
#define TENSOR4D(name, type) TENSOR4D_STR(name, type)
-/** Loop unrolling */
-#define LOOP_UNROLLING(DATA_TYPE, VAR, START_IDX, NUM_ITERATIONS, STEP) \
- _Pragma("unroll") for(DATA_TYPE VAR = START_IDX; VAR < NUM_ITERATIONS; VAR += STEP)
+#define UNROLL_INCR(idx, step, macro) idx += (step); (macro)
+
+#define LOOP_UNROLLING_1(idx, step, macro) (macro)
+#define LOOP_UNROLLING_2(idx, step, macro) LOOP_UNROLLING_1(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_3(idx, step, macro) LOOP_UNROLLING_2(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_4(idx, step, macro) LOOP_UNROLLING_3(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_5(idx, step, macro) LOOP_UNROLLING_4(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_6(idx, step, macro) LOOP_UNROLLING_5(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_7(idx, step, macro) LOOP_UNROLLING_6(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_8(idx, step, macro) LOOP_UNROLLING_7(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_9(idx, step, macro) LOOP_UNROLLING_8(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_10(idx, step, macro) LOOP_UNROLLING_9(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_11(idx, step, macro) LOOP_UNROLLING_10(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_12(idx, step, macro) LOOP_UNROLLING_11(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_13(idx, step, macro) LOOP_UNROLLING_12(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_14(idx, step, macro) LOOP_UNROLLING_13(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_15(idx, step, macro) LOOP_UNROLLING_14(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_16(idx, step, macro) LOOP_UNROLLING_15(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_17(idx, step, macro) LOOP_UNROLLING_16(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_18(idx, step, macro) LOOP_UNROLLING_17(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_19(idx, step, macro) LOOP_UNROLLING_18(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_20(idx, step, macro) LOOP_UNROLLING_19(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_21(idx, step, macro) LOOP_UNROLLING_20(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_22(idx, step, macro) LOOP_UNROLLING_21(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_23(idx, step, macro) LOOP_UNROLLING_22(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_24(idx, step, macro) LOOP_UNROLLING_23(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_25(idx, step, macro) LOOP_UNROLLING_24(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_26(idx, step, macro) LOOP_UNROLLING_25(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_27(idx, step, macro) LOOP_UNROLLING_26(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_28(idx, step, macro) LOOP_UNROLLING_27(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_29(idx, step, macro) LOOP_UNROLLING_28(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_30(idx, step, macro) LOOP_UNROLLING_29(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_31(idx, step, macro) LOOP_UNROLLING_30(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_32(idx, step, macro) LOOP_UNROLLING_31(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_33(idx, step, macro) LOOP_UNROLLING_32(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_34(idx, step, macro) LOOP_UNROLLING_33(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_35(idx, step, macro) LOOP_UNROLLING_34(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_36(idx, step, macro) LOOP_UNROLLING_35(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_37(idx, step, macro) LOOP_UNROLLING_36(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_38(idx, step, macro) LOOP_UNROLLING_37(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_39(idx, step, macro) LOOP_UNROLLING_38(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_40(idx, step, macro) LOOP_UNROLLING_39(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_41(idx, step, macro) LOOP_UNROLLING_40(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_42(idx, step, macro) LOOP_UNROLLING_41(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_43(idx, step, macro) LOOP_UNROLLING_42(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_44(idx, step, macro) LOOP_UNROLLING_43(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_45(idx, step, macro) LOOP_UNROLLING_44(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_46(idx, step, macro) LOOP_UNROLLING_45(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_47(idx, step, macro) LOOP_UNROLLING_46(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_48(idx, step, macro) LOOP_UNROLLING_47(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_49(idx, step, macro) LOOP_UNROLLING_48(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_50(idx, step, macro) LOOP_UNROLLING_49(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_51(idx, step, macro) LOOP_UNROLLING_50(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_52(idx, step, macro) LOOP_UNROLLING_51(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_53(idx, step, macro) LOOP_UNROLLING_52(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_54(idx, step, macro) LOOP_UNROLLING_53(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_55(idx, step, macro) LOOP_UNROLLING_54(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_56(idx, step, macro) LOOP_UNROLLING_55(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_57(idx, step, macro) LOOP_UNROLLING_56(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_58(idx, step, macro) LOOP_UNROLLING_57(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_59(idx, step, macro) LOOP_UNROLLING_58(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_60(idx, step, macro) LOOP_UNROLLING_59(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_61(idx, step, macro) LOOP_UNROLLING_60(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_62(idx, step, macro) LOOP_UNROLLING_61(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_63(idx, step, macro) LOOP_UNROLLING_62(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_64(idx, step, macro) LOOP_UNROLLING_63(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_65(idx, step, macro) LOOP_UNROLLING_64(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_66(idx, step, macro) LOOP_UNROLLING_65(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_67(idx, step, macro) LOOP_UNROLLING_66(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_68(idx, step, macro) LOOP_UNROLLING_67(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_69(idx, step, macro) LOOP_UNROLLING_68(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_70(idx, step, macro) LOOP_UNROLLING_69(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_71(idx, step, macro) LOOP_UNROLLING_70(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_72(idx, step, macro) LOOP_UNROLLING_71(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_73(idx, step, macro) LOOP_UNROLLING_72(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_74(idx, step, macro) LOOP_UNROLLING_73(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_75(idx, step, macro) LOOP_UNROLLING_74(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_76(idx, step, macro) LOOP_UNROLLING_75(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_77(idx, step, macro) LOOP_UNROLLING_76(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_78(idx, step, macro) LOOP_UNROLLING_77(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_79(idx, step, macro) LOOP_UNROLLING_78(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_80(idx, step, macro) LOOP_UNROLLING_79(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_81(idx, step, macro) LOOP_UNROLLING_80(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_82(idx, step, macro) LOOP_UNROLLING_81(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_83(idx, step, macro) LOOP_UNROLLING_82(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_84(idx, step, macro) LOOP_UNROLLING_83(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_85(idx, step, macro) LOOP_UNROLLING_84(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_86(idx, step, macro) LOOP_UNROLLING_85(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_87(idx, step, macro) LOOP_UNROLLING_86(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_88(idx, step, macro) LOOP_UNROLLING_87(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_89(idx, step, macro) LOOP_UNROLLING_88(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_90(idx, step, macro) LOOP_UNROLLING_89(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_91(idx, step, macro) LOOP_UNROLLING_90(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_92(idx, step, macro) LOOP_UNROLLING_91(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_93(idx, step, macro) LOOP_UNROLLING_92(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_94(idx, step, macro) LOOP_UNROLLING_93(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_95(idx, step, macro) LOOP_UNROLLING_94(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_96(idx, step, macro) LOOP_UNROLLING_95(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_97(idx, step, macro) LOOP_UNROLLING_96(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_98(idx, step, macro) LOOP_UNROLLING_97(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_99(idx, step, macro) LOOP_UNROLLING_98(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_100(idx, step, macro) LOOP_UNROLLING_99(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_101(idx, step, macro) LOOP_UNROLLING_100(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_102(idx, step, macro) LOOP_UNROLLING_101(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_103(idx, step, macro) LOOP_UNROLLING_102(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_104(idx, step, macro) LOOP_UNROLLING_103(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_105(idx, step, macro) LOOP_UNROLLING_104(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_106(idx, step, macro) LOOP_UNROLLING_105(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_107(idx, step, macro) LOOP_UNROLLING_106(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_108(idx, step, macro) LOOP_UNROLLING_107(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_109(idx, step, macro) LOOP_UNROLLING_108(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_110(idx, step, macro) LOOP_UNROLLING_109(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_111(idx, step, macro) LOOP_UNROLLING_110(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_112(idx, step, macro) LOOP_UNROLLING_111(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_113(idx, step, macro) LOOP_UNROLLING_112(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_114(idx, step, macro) LOOP_UNROLLING_113(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_115(idx, step, macro) LOOP_UNROLLING_114(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_116(idx, step, macro) LOOP_UNROLLING_115(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_117(idx, step, macro) LOOP_UNROLLING_116(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_118(idx, step, macro) LOOP_UNROLLING_117(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_119(idx, step, macro) LOOP_UNROLLING_118(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_120(idx, step, macro) LOOP_UNROLLING_119(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_121(idx, step, macro) LOOP_UNROLLING_120(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_122(idx, step, macro) LOOP_UNROLLING_121(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_123(idx, step, macro) LOOP_UNROLLING_122(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_124(idx, step, macro) LOOP_UNROLLING_123(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_125(idx, step, macro) LOOP_UNROLLING_124(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_126(idx, step, macro) LOOP_UNROLLING_125(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_127(idx, step, macro) LOOP_UNROLLING_126(idx, step, macro); UNROLL_INCR(idx, step, macro)
+#define LOOP_UNROLLING_128(idx, step, macro) LOOP_UNROLLING_127(idx, step, macro); UNROLL_INCR(idx, step, macro)
+
+#define LOOP_UNROLLING(type, idx, start, step, num, macro) LOOP_UNROLLING_STR(type, idx, start, step, num, macro)
+#define LOOP_UNROLLING_STR(type, idx, start, step, num, macro) \
+ { \
+ type idx = start; \
+ LOOP_UNROLLING_##num(idx, step, macro); \
+ }
/** Get the get_global_id with partial N0. This function is useful when the dimension is not multiple of N0 and we need to use a partial N0
* to avoid out-of-bound read/write
@@ -122,15 +259,15 @@
val += (DST_DATA_TYPE)x.s3 * (DST_DATA_TYPE)y.s3; \
})
#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
-#define DOT_PRODUCT8_INTEGER8(DST_DATA_TYPE, a, b, c) \
- ({ \
- DOT_PRODUCT4_INTEGER8(DST_DATA_TYPE, (a.lo), (b.lo), c); \
- DOT_PRODUCT4_INTEGER8(DST_DATA_TYPE, (a.hi), (b.hi), c); \
+#define DOT_PRODUCT8_INTEGER8(DST_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT4_INTEGER8(DST_DATA_TYPE, (a.lo), (b.lo), c); \
+ DOT_PRODUCT4_INTEGER8(DST_DATA_TYPE, (a.hi), (b.hi), c); \
})
-#define DOT_PRODUCT16_INTEGER8(DST_DATA_TYPE, a, b, c) \
- ({ \
- DOT_PRODUCT8_INTEGER8(DST_DATA_TYPE, (a.lo), (b.lo), c); \
- DOT_PRODUCT8_INTEGER8(DST_DATA_TYPE, (a.hi), (b.hi), c); \
+#define DOT_PRODUCT16_INTEGER8(DST_DATA_TYPE, a, b, c) \
+ ({ \
+ DOT_PRODUCT8_INTEGER8(DST_DATA_TYPE, (a.lo), (b.lo), c); \
+ DOT_PRODUCT8_INTEGER8(DST_DATA_TYPE, (a.hi), (b.hi), c); \
})
/** Load a vector from global memory (tensor)
@@ -168,11 +305,11 @@
* @param[out] dst Output tile
*/
#define T_LOAD(DATA_TYPE, HEIGHT, WIDTH, TENSOR_TYPE, TENSOR, X, Y, YI_MULTIPLIER, STRIDE_Y, dst) \
- ({ \
- LOOP_UNROLLING(int, _i, 0, HEIGHT, 1) \
- { \
+ ({ \
+ LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
+ { \
dst[_i].v = V_LOAD(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, ((Y) + _i * (int)(YI_MULTIPLIER)), STRIDE_Y); \
- } \
+ }) \
})
/** Load a tile from global memory (tensor) using an indirect Y index tile
@@ -189,11 +326,11 @@
* @param[out] dst Output tile
*/
#define T_LOAD_INDIRECT(DATA_TYPE, HEIGHT, WIDTH, TENSOR_TYPE, TENSOR, X, STRIDE_Y, indirect_y, dst) \
- ({ \
- LOOP_UNROLLING(int, _i, 0, HEIGHT, 1) \
- { \
+ ({ \
+ LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
+ { \
dst[_i].v = V_LOAD(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, (indirect_y[_i].v), STRIDE_Y); \
- } \
+ }) \
})
/** Load a tile from global memory (tensor) when the tensor is stored using a NHWC layout
@@ -215,27 +352,26 @@
* @param[out] dst Output tile
*/
#define T_LOAD_NHWC(DATA_TYPE, TILE_HEIGHT, TILE_WIDTH, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, STRIDE_Y, dst) \
- ({ \
- LOOP_UNROLLING(int, _yk, 0, (TILE_HEIGHT), 1) \
- { \
- LOOP_UNROLLING(int, _xk, 0, (TILE_WIDTH), 1) \
- { \
- int _src_y = (X) + _xk + ((Y) + _yk) * (TENSOR_WIDTH); \
- _src_y += (B) * (int)(TENSOR_WIDTH) * (int)(TENSOR_HEIGHT); \
+ ({ \
+ LOOP_UNROLLING(int, _yk, 0, 1, TILE_HEIGHT, \
+ { \
+ LOOP_UNROLLING(int, _xk, 0, 1, TILE_WIDTH, \
+ { \
+ int _src_y = (X) + _xk + ((Y) + _yk) * (TENSOR_WIDTH); \
+ _src_y += (B) * (int)(TENSOR_WIDTH) * (int)(TENSOR_HEIGHT); \
int _src_valid_y = (((X) + _xk) >= 0 && ((X) + _xk) < (int)(TENSOR_WIDTH) && ((Y) + _yk) >= 0 && ((Y) + _yk) < (int)(TENSOR_HEIGHT)); \
- if(_src_valid_y != 0) \
- { \
- dst[_xk + _yk * (TILE_WIDTH)].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, _src_y, STRIDE_Y); \
+ if(_src_valid_y != 0) \
+ { \
+ dst[_xk + _yk * (TILE_WIDTH)].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, _src_y, STRIDE_Y); \
} \
- } \
- } \
+ }) \
+ }) \
})
/** Load a tile from global memory (tensor) when the tensor is stored using a NHWC layout using indirect X and Y coordinates
*
* @param[in] DATA_TYPE Data type
- * @param[in] TILE_HEIGHT Number of elements to load from Y (height) dimension
- * @param[in] TILE_WIDTH Number of elements to load from X (width) dimension
+ * @param[in] TILE_AREA Number of elements to load from Y (height) dimension * Number of elements to load from X (width) dimension
* @param[in] TILE_CHANNELS Number of elements to load from C (channel) dimension
* @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image). Currently BUFFER only is supported
* In case of cl_image, only TILE_CHANNELS multiples of 4 are supported (4, 8, 16)
@@ -251,18 +387,18 @@
* @param[out] yi A tile with (TILE_WIDTH x TILE_HEIGHT) values with the indirect Y coordinate
* @param[out] dst Output tile
*/
-#define T_LOAD_NHWC_INDIRECT(DATA_TYPE, TILE_HEIGHT, TILE_WIDTH, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, STRIDE_Y, xi, yi, dst) \
- ({ \
- LOOP_UNROLLING(int, _i, 0, (TILE_WIDTH * TILE_HEIGHT), 1) \
- { \
- int _src_y = (X) + xi[_i].v + ((Y) + yi[_i].v) * (TENSOR_WIDTH); \
- _src_y += (B) * (int)(TENSOR_WIDTH) * (int)(TENSOR_HEIGHT); \
+#define T_LOAD_NHWC_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, STRIDE_Y, xi, yi, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \
+ { \
+ int _src_y = (X) + xi[_i].v + ((Y) + yi[_i].v) * (TENSOR_WIDTH); \
+ _src_y += (B) * (int)(TENSOR_WIDTH) * (int)(TENSOR_HEIGHT); \
int _src_valid_y = (((X) + xi[_i].v) >= 0 && ((X) + xi[_i].v) < (int)(TENSOR_WIDTH) && ((Y) + yi[_i].v) >= 0 && ((Y) + yi[_i].v) < (int)(TENSOR_HEIGHT)); \
- if(_src_valid_y != 0) \
- { \
- dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, _src_y, STRIDE_Y); \
+ if(_src_valid_y != 0) \
+ { \
+ dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, _src_y, STRIDE_Y); \
} \
- } \
+ }) \
})
/** Store a tile to global memory (tensor) using an indirect Y index tile and conditionally use a different length for the store
@@ -287,19 +423,19 @@
({ \
if(WIDTH1_CONDITION) \
{ \
- LOOP_UNROLLING(int, _i, 0, HEIGHT, 1) \
+ LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
{ \
VSTORE_PARTIAL(WIDTH0, WIDTH1) \
(src[HEIGHT - 1 - _i].v, 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \
- } \
+ }) \
} \
else \
{ \
- LOOP_UNROLLING(int, _i, 0, HEIGHT, 1) \
+ LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \
{ \
VSTORE(WIDTH0) \
(src[HEIGHT - 1 - _i].v, 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \
- } \
+ }) \
} \
})
@@ -315,24 +451,24 @@
* @param[in] rhs RHS tile
* @param[out] dst DST tile
*/
-#define T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, K0, SRC_OFFSET, WEI_OFFSET, lhs, rhs, dst) \
- ({ \
- LOOP_UNROLLING(int, _m0, 0, M0, 1) \
- { \
- ACC_DATA_TYPE _tm = 0; \
- LOOP_UNROLLING(int, _k0, 0, K0, 1) \
- { \
- _tm += ((ACC_DATA_TYPE)lhs[_m0].s[_k0] * (ACC_DATA_TYPE)WEI_OFFSET); \
- } \
- LOOP_UNROLLING(int, _n0, 0, N0, 1) \
- { \
- dst[_m0].s[_n0] += _tm; \
- LOOP_UNROLLING(int, _k0, 0, K0, 1) \
- { \
+#define T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, K0, SRC_OFFSET, WEI_OFFSET, lhs, rhs, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ ACC_DATA_TYPE _tm = 0; \
+ LOOP_UNROLLING(int, _k0, 0, 1, K0, \
+ { \
+ _tm += ((ACC_DATA_TYPE)lhs[_m0].s[_k0] * (ACC_DATA_TYPE)WEI_OFFSET); \
+ }) \
+ LOOP_UNROLLING(int, _n0, 0, 1, N0, \
+ { \
+ dst[_m0].s[_n0] += _tm; \
+ LOOP_UNROLLING(int, _k0, 0, 1, K0, \
+ { \
dst[_m0].s[_n0] += ((ACC_DATA_TYPE)rhs[_n0].s[_k0] * (ACC_DATA_TYPE)SRC_OFFSET); \
- } \
- } \
- } \
+ }) \
+ }) \
+ }); \
})
/** Quantized the tile (ASYMMETRIC) with fixed-point scale
@@ -347,25 +483,25 @@
* @param[in] src Input tile
* @param[out] dst Output tile
*/
-#define T_QUANTIZE8_ASYMMETRIC(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst) \
- ({ \
- LOOP_UNROLLING(int, _m0, 0, M0, 1) \
- { \
- LOOP_UNROLLING(int, _n0, 0, N0, 1) \
- { \
- SRC_DATA_TYPE _tmp = 0; \
- if(DST_SHIFT < 0) \
- { \
+#define T_QUANTIZE8_ASYMMETRIC(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst) \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n0, 0, 1, N0, \
+ { \
+ SRC_DATA_TYPE _tmp = 0; \
+ if(DST_SHIFT < 0) \
+ { \
_tmp = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(src[_m0].s[_n0], DST_MULTIPLIER, DST_SHIFT, 1); \
- } \
- else \
- { \
- _tmp = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(src[_m0].s[_n0], DST_MULTIPLIER, DST_SHIFT, 1); \
- } \
- _tmp += DST_OFFSET; \
- dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \
- } \
- } \
+ } \
+ else \
+ { \
+ _tmp = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(src[_m0].s[_n0], DST_MULTIPLIER, DST_SHIFT, 1); \
+ } \
+ _tmp += DST_OFFSET; \
+ dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \
+ }) \
+ }) \
})
/** Conditional rowset (memset by row)
@@ -380,14 +516,14 @@
* @param[out] mask Mask to check for setting the row to VALUE_TO_SET
*/
#define T_ROWSET_MASK(DATA_TYPE, M0, N0, VALUE_TO_SET, a, mask) \
- ({ \
- LOOP_UNROLLING(int, _m0, 0, M0, 1) \
- { \
- LOOP_UNROLLING(int, _n0, 0, N0, 1) \
- { \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n0, 0, 1, N0, \
+ { \
a[_m0].s[_n0] = select((DATA_TYPE)(a[_m0].s[_n0]), (DATA_TYPE)(VALUE_TO_SET), (SELECT_DATA_TYPE(DATA_TYPE))(mask[_m0].v == (DATA_TYPE)0)); \
- } \
- } \
+ }) \
+ }) \
})
/** Element-wise activation
@@ -404,11 +540,11 @@
* @param[out] dst DST tile
*/
#define T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, src, dst) \
- ({ \
- LOOP_UNROLLING(int, _m0, 0, M0, 1) \
- { \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
dst[_m0].v = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, N0, src[_m0].v, A_VAL, B_VAL); \
- } \
+ }) \
})
/** Element-wise addition with a constant value
@@ -423,14 +559,14 @@
* @param[out] dst DST tile
*/
#define T_ADD_CONSTANT(DATA_TYPE, M0, N0, lhs, rhs_constant, dst) \
- ({ \
- LOOP_UNROLLING(int, _m0, 0, M0, 1) \
- { \
- LOOP_UNROLLING(int, _n0, 0, N0, 1) \
- { \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n0, 0, 1, N0, \
+ { \
dst[_m0].s[_n0] = lhs[_m0].s[_n0] + rhs_constant; \
- } \
- } \
+ }) \
+ }) \
})
/** Element-wise addition with RHS broadcasted (RHS has the X dimension only)
@@ -446,11 +582,11 @@
* @param[out] dst DST tile
*/
#define T_ADD_BROADCAST_X(DATA_TYPE, M0, N0, lhs, rhs, dst) \
- ({ \
- LOOP_UNROLLING(int, _m0, 0, M0, 1) \
- { \
+ ({ \
+ LOOP_UNROLLING(int, _m0, 0, 1, M0, \
+ { \
dst[_m0].v = lhs[_m0].v + rhs[0].v; \
- } \
+ }) \
})
/** Matrix multiplication
@@ -478,24 +614,27 @@
#define T_MMUL_NT_T_uchar_uchar_int(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst)
#define T_MMUL_NT_T_FLOAT(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \
{ \
- LOOP_UNROLLING(int, _m, 0, M0, 1) \
+ LOOP_UNROLLING(int, _m, 0, 1, M0, \
{ \
- LOOP_UNROLLING(int, _n, 0, N0, 1) \
+ LOOP_UNROLLING(int, _n, 0, 1, N0, \
{ \
- LOOP_UNROLLING(int, _k, 0, K0, 1) \
+ LOOP_UNROLLING(int, _k, 0, 1, K0, \
{ \
dst[_m].s[_n] = fma((lhs[_m].s[_k]), (rhs[_n].s[_k]), dst[_m].s[_n]); \
- } \
- } \
- } \
+ }) \
+ }) \
+ }) \
}
#define T_MMUL_NT_T_INTEGER8(DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \
- ({ \
- LOOP_UNROLLING(int, _m, 0, M0, 1) \
- { \
- LOOP_UNROLLING(int, _n, 0, N0, 1) \
- { \
+ ({ \
+ LOOP_UNROLLING(int, _m, 0, 1, M0, \
+ { \
+ LOOP_UNROLLING(int, _n, 0, 1, N0, \
+ { \
DOT_PRODUCT_INTEGER8(DST_DATA_TYPE, K0, (lhs[_m].v), (rhs[_n].v), dst[_m].s[_n]); \
- } \
- } \
- }) \ No newline at end of file
+ }) \
+ }) \
+ })
+
+// clang-format on
+// *INDENT-ON* \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/winograd_input_transform.cl b/src/core/CL/cl_kernels/winograd_input_transform.cl
index 932e1643fd..fbb5e95196 100644
--- a/src/core/CL/cl_kernels/winograd_input_transform.cl
+++ b/src/core/CL/cl_kernels/winograd_input_transform.cl
@@ -971,10 +971,10 @@ __kernel void winograd_input_transform_4x4_3x3_stepz1_nhwc(
TILE(DATA_TYPE, 6, 1, out);
// Initialize the input tile
- LOOP_UNROLLING(int, i, 0, 6, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 6,
{
in[i].v = 0;
- }
+ })
#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
T_LOAD_NHWC(DATA_TYPE, 1, 6, 1, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
@@ -984,10 +984,10 @@ __kernel void winograd_input_transform_4x4_3x3_stepz1_nhwc(
TILE(DATA_TYPE, 6, 1, com);
- LOOP_UNROLLING(int, i, 0, 6, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 6,
{
in[i].v *= 4.0f;
- }
+ })
com[0].v = in[2].v - 4.f * in[0].v;
com[1].v = in[3].v - 4.f * in[1].v;
@@ -1006,11 +1006,11 @@ __kernel void winograd_input_transform_4x4_3x3_stepz1_nhwc(
TILE(uint, 6, 1, dst_indirect_y);
- LOOP_UNROLLING(int, i, 0, 6, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 6,
{
dst_indirect_y[i].v = mout + i * _INUM_TILES_X * _INUM_TILES_Y;
dst_indirect_y[i].v += bout * _INUM_TILES_X * _INUM_TILES_Y * 6;
- }
+ })
T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 6, 1, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
@@ -1019,10 +1019,10 @@ __kernel void winograd_input_transform_4x4_3x3_stepz1_nhwc(
TILE(DATA_TYPE, 36, 1, in);
// Initialize the input tile
- LOOP_UNROLLING(int, i, 0, 36, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 36,
{
in[i].v = 0;
- }
+ })
// Load the tile from a NHWC tensor
T_LOAD_NHWC(DATA_TYPE, 6, 6, 1, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
@@ -1030,7 +1030,7 @@ __kernel void winograd_input_transform_4x4_3x3_stepz1_nhwc(
TILE(DATA_TYPE, 6, 1, com);
TILE(DATA_TYPE, 36, 1, tmp);
- LOOP_UNROLLING(int, i, 0, 6, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 6,
{
com[0].v = in[2 * 6 + i].v - (DATA_TYPE)4.0f * in[0 * 6 + i].v;
com[1].v = in[3 * 6 + i].v - (DATA_TYPE)4.0f * in[1 * 6 + i].v;
@@ -1045,11 +1045,11 @@ __kernel void winograd_input_transform_4x4_3x3_stepz1_nhwc(
tmp[i + 3 * 6].v = com[5].v + com[4].v;
tmp[i + 4 * 6].v = com[5].v - com[4].v;
tmp[i + 5 * 6].v = com[3].v - com[1].v;
- }
+ })
TILE(DATA_TYPE, 36, 1, out);
- LOOP_UNROLLING(int, i, 0, 6, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 6,
{
com[0].v = tmp[i * 6 + 2].v - 4.f * tmp[i * 6 + 0].v;
com[1].v = tmp[i * 6 + 3].v - 4.f * tmp[i * 6 + 1].v;
@@ -1064,16 +1064,16 @@ __kernel void winograd_input_transform_4x4_3x3_stepz1_nhwc(
out[i * 6 + 3].v = com[5].v + com[4].v;
out[i * 6 + 4].v = com[5].v - com[4].v;
out[i * 6 + 5].v = com[3].v - com[1].v;
- }
+ })
// Compute destination address
TILE(uint, 36, 1, dst_indirect_y);
- LOOP_UNROLLING(int, i, 0, 36, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 36,
{
dst_indirect_y[i].v = mout + i * _INUM_TILES_X * _INUM_TILES_Y;
dst_indirect_y[i].v += bout * _INUM_TILES_X * _INUM_TILES_Y * 36;
- }
+ })
T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 36, 1, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
#endif // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL) || defined(WINOGRAD_INPUT_TRANSFORM_VERTICAL)
@@ -1141,10 +1141,10 @@ __kernel void winograd_input_transform_4x4_5x5_stepz1_nhwc(
TILE(DATA_TYPE, 8, 1, out);
// Initialize the input tile
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
in[i].v = 0;
- }
+ })
#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
T_LOAD_NHWC(DATA_TYPE, 1, 8, 1, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
@@ -1171,11 +1171,11 @@ __kernel void winograd_input_transform_4x4_5x5_stepz1_nhwc(
TILE(uint, 8, 1, dst_indirect_y);
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
dst_indirect_y[i].v = mout + i * _INUM_TILES_X * _INUM_TILES_Y;
dst_indirect_y[i].v += bout * _INUM_TILES_X * _INUM_TILES_Y * 8;
- }
+ })
T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 8, 1, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
@@ -1185,17 +1185,17 @@ __kernel void winograd_input_transform_4x4_5x5_stepz1_nhwc(
TILE(DATA_TYPE, 64, 1, out);
// Initialize the input tile
- LOOP_UNROLLING(int, i, 0, 64, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 64,
{
in[i].v = 0;
- }
+ })
// Load the tile from a NHWC tensor
T_LOAD_NHWC(DATA_TYPE, 8, 8, 1, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
TILE(DATA_TYPE, 8, 8, com);
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
com[0].s[i] = in[2 * 8 + i].s[0] - (DATA_TYPE)4.25f * in[4 * 8 + i].s[0] + in[6 * 8 + i].s[0]; // x
com[1].s[i] = in[1 * 8 + i].s[0] - (DATA_TYPE)4.25f * in[3 * 8 + i].s[0] + in[5 * 8 + i].s[0]; // x
@@ -1205,7 +1205,7 @@ __kernel void winograd_input_transform_4x4_5x5_stepz1_nhwc(
com[5].s[i] = (DATA_TYPE)2.0f * in[1 * 8 + i].s[0] - (DATA_TYPE)2.5f * in[3 * 8 + i].s[0] + (DATA_TYPE)0.5f * in[5 * 8 + i].s[0];
com[6].s[i] = in[0 * 8 + i].s[0] - (DATA_TYPE)5.25f * in[2 * 8 + i].s[0] + (DATA_TYPE)5.25f * in[4 * 8 + i].s[0] - in[6 * 8 + i].s[0];
com[7].s[i] = -in[1 * 8 + i].s[0] + (DATA_TYPE)5.25f * in[3 * 8 + i].s[0] - (DATA_TYPE)5.25f * in[5 * 8 + i].s[0] + in[7 * 8 + i].s[0];
- }
+ })
TILE(DATA_TYPE, 8, 8, tmp);
tmp[0].v = com[6].v;
@@ -1217,7 +1217,7 @@ __kernel void winograd_input_transform_4x4_5x5_stepz1_nhwc(
tmp[6].v = com[4].v - com[5].v;
tmp[7].v = com[7].v;
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
com[0].s[0] = tmp[i].s[2] - 4.25f * tmp[i].s[4] + tmp[i].s[6];
com[0].s[1] = tmp[i].s[1] - 4.25f * tmp[i].s[3] + tmp[i].s[5];
@@ -1233,15 +1233,15 @@ __kernel void winograd_input_transform_4x4_5x5_stepz1_nhwc(
out[i * 8 + 5].s[0] = com[0].s[4] + com[0].s[5];
out[i * 8 + 6].s[0] = com[0].s[4] - com[0].s[5];
out[i * 8 + 7].s[0] = -tmp[i].s[1] + 5.25f * tmp[i].s[3] - 5.25f * tmp[i].s[5] + tmp[i].s[7];
- }
+ })
TILE(uint, 64, 1, dst_indirect_y);
- LOOP_UNROLLING(int, i, 0, 64, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 64,
{
dst_indirect_y[i].v = mout + i * _INUM_TILES_X * _INUM_TILES_Y;
dst_indirect_y[i].v += bout * _INUM_TILES_X * _INUM_TILES_Y * 64;
- }
+ })
T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 64, 1, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
@@ -1310,10 +1310,10 @@ __kernel void winograd_input_transform_2x2_7x7_stepz1_nhwc(
TILE(DATA_TYPE, 8, 1, out);
// Initialize the input tile
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
in[i].v = 0;
- }
+ })
#if defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
T_LOAD_NHWC(DATA_TYPE, 1, 8, 1, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
@@ -1321,10 +1321,10 @@ __kernel void winograd_input_transform_2x2_7x7_stepz1_nhwc(
T_LOAD_NHWC(DATA_TYPE, 8, 1, 1, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
#endif // defined(WINOGRAD_INPUT_TRANSFORM_HORIZONTAL)
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
in[i].v *= (DATA_TYPE) - 36.0f;
- }
+ })
TILE(DATA_TYPE, 1, 8, com) = { { { 0 } } };
@@ -1345,11 +1345,11 @@ __kernel void winograd_input_transform_2x2_7x7_stepz1_nhwc(
TILE(uint, 8, 1, dst_indirect_y);
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
dst_indirect_y[i].v = mout + i * _INUM_TILES_X * _INUM_TILES_Y;
dst_indirect_y[i].v += bout * _INUM_TILES_X * _INUM_TILES_Y * 8;
- }
+ })
T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 8, 1, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
@@ -1359,17 +1359,17 @@ __kernel void winograd_input_transform_2x2_7x7_stepz1_nhwc(
TILE(DATA_TYPE, 64, 1, out);
// Initialize the input tile
- LOOP_UNROLLING(int, i, 0, 64, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 64,
{
in[i].v = 0;
- }
+ })
// Load the tile from a NHWC tensor
T_LOAD_NHWC(DATA_TYPE, 8, 8, 1, BUFFER, src, bout, y, x, cout, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, in);
TILE(DATA_TYPE, 8, 8, com);
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
com[0].s[i] = (DATA_TYPE)36.0f * in[2 * 8 + i].s[0] - (DATA_TYPE)13.0f * in[4 * 8 + i].s[0] + in[6 * 8 + i].s[0];
com[1].s[i] = (DATA_TYPE)36.0f * in[1 * 8 + i].s[0] - (DATA_TYPE)13.0f * in[3 * 8 + i].s[0] + in[5 * 8 + i].s[0];
@@ -1379,7 +1379,7 @@ __kernel void winograd_input_transform_2x2_7x7_stepz1_nhwc(
com[5].s[i] = (DATA_TYPE)12.0f * in[1 * 8 + i].s[0] - (DATA_TYPE)15.0f * in[3 * 8 + i].s[0] + (DATA_TYPE)3.0f * in[5 * 8 + i].s[0];
com[6].s[i] = (DATA_TYPE)49.0f * in[2 * 8 + i].s[0] - (DATA_TYPE)36.0f * in[0 * 8 + i].s[0] + in[6 * 8 + i].s[0] - (DATA_TYPE)14.0f * in[4 * 8 + i].s[0];
com[7].s[i] = (DATA_TYPE)49.0f * in[3 * 8 + i].s[0] - (DATA_TYPE)36.0f * in[1 * 8 + i].s[0] + in[7 * 8 + i].s[0] - (DATA_TYPE)14.0f * in[5 * 8 + i].s[0];
- }
+ })
TILE(DATA_TYPE, 8, 8, tmp);
tmp[0].v = com[6].v;
@@ -1391,7 +1391,7 @@ __kernel void winograd_input_transform_2x2_7x7_stepz1_nhwc(
tmp[6].v = com[4].v + com[5].v;
tmp[7].v = com[7].v;
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
com[0].s[0] = 36.0f * tmp[i].s[2] - 13.0f * tmp[i].s[4] + tmp[i].s[6];
com[0].s[1] = 36.0f * tmp[i].s[1] - 13.0f * tmp[i].s[3] + 1.0f * tmp[i].s[5];
@@ -1407,15 +1407,15 @@ __kernel void winograd_input_transform_2x2_7x7_stepz1_nhwc(
out[i * 8 + 5].s[0] = com[0].s[4] - com[0].s[5];
out[i * 8 + 6].s[0] = com[0].s[4] + com[0].s[5];
out[i * 8 + 7].s[0] = -36.0f * tmp[i].s[1] + 0.0f * tmp[i].s[2] + 49.0f * tmp[i].s[3] - 14.0f * tmp[i].s[5] + tmp[i].s[7];
- }
+ })
TILE(uint, 64, 1, dst_indirect_y);
- LOOP_UNROLLING(int, i, 0, 64, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 64,
{
dst_indirect_y[i].v = mout + i * _INUM_TILES_X * _INUM_TILES_Y;
dst_indirect_y[i].v += bout * _INUM_TILES_X * _INUM_TILES_Y * 64;
- }
+ })
T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 64, 1, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
diff --git a/src/core/CL/cl_kernels/winograd_output_transform.cl b/src/core/CL/cl_kernels/winograd_output_transform.cl
index 9a5ca89a98..6a3e6d3346 100644
--- a/src/core/CL/cl_kernels/winograd_output_transform.cl
+++ b/src/core/CL/cl_kernels/winograd_output_transform.cl
@@ -237,17 +237,17 @@ __kernel void winograd_output_transform_2x2_7x7_nhwc(
TILE(uint, 8, 1, src_indirect_y);
// Calculate the indirect Y for the source tensor
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
src_indirect_y[i].v = mout + i * _ISRC_HEIGHT;
src_indirect_y[i].v += bout * (int)(_ISRC_HEIGHT * 8);
- }
+ })
// Initialize the input tile
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
in[i].v = 0;
- }
+ })
// Load the values across the 8 channels to compose the 8x1 tile
T_LOAD_INDIRECT(DATA_TYPE, 8, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in);
@@ -270,17 +270,17 @@ __kernel void winograd_output_transform_2x2_7x7_nhwc(
TILE(uint, 2, 1, dst_indirect_y);
#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- LOOP_UNROLLING(int, yk, 0, 2, 1)
+ LOOP_UNROLLING(int, yk, 0, 1, 2,
{
int y_c = min(y_out + yk, ((int)_IDST_HEIGHT - 1));
dst_indirect_y[yk].v = x_out + y_c * (int)(_IDST_WIDTH);
- }
+ })
#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- LOOP_UNROLLING(int, xk, 0, 2, 1)
+ LOOP_UNROLLING(int, xk, 0, 1, 2,
{
int x_c = min(x_out + xk, ((int)_IDST_WIDTH - 1));
dst_indirect_y[xk].v = x_c + y_out * (int)(_IDST_WIDTH);
- }
+ })
#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
// Store the tile in reverse order so the invalid values are overwritten with the valid ones
@@ -294,33 +294,33 @@ __kernel void winograd_output_transform_2x2_7x7_nhwc(
TILE(uint, 64, 1, src_indirect_y);
// Calculate the indirect Y for the source tensor
- LOOP_UNROLLING(int, i, 0, 64, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 64,
{
src_indirect_y[i].v = mout + i * _ISRC_HEIGHT;
src_indirect_y[i].v += bout * (int)(_ISRC_HEIGHT * 64);
- }
+ })
// Initialize the input tile
- LOOP_UNROLLING(int, i, 0, 64, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 64,
{
in[i].v = 0;
- }
+ })
// Load the values across the 64 channels to compose the 8x8 tile
T_LOAD_INDIRECT(DATA_TYPE, 64, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in);
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
tmp[i * 2].v = in[0 + i].v + in[8 + i].v + in[16 + i].v + in[24 + i].v + in[32 + i].v + in[40 + i].v + in[48 + i].v;
tmp[i * 2 + 1].v = -in[8 + i].v + in[16 + i].v - 2 * in[24 + i].v + 2 * in[32 + i].v + -3 * in[40 + i].v + 3 * in[48 + i].v + in[56 + i].v;
- }
+ })
// Compute the 2x2 output tile
- LOOP_UNROLLING(int, i, 0, 2, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 2,
{
out[i * 2].v = tmp[0 + i].v + tmp[2 + i].v + tmp[4 + i].v + tmp[6 + i].v + tmp[8 + i].v + tmp[10 + i].v + tmp[12 + i].v;
out[i * 2 + 1].v = -tmp[2 + i].v + tmp[4 + i].v - 2 * tmp[6 + i].v + 2 * tmp[8 + i].v - 3 * tmp[10 + i].v + 3 * tmp[12 + i].v + tmp[14 + i].v;
- }
+ })
#if defined(HAS_BIAS)
// Add bias
@@ -336,16 +336,16 @@ __kernel void winograd_output_transform_2x2_7x7_nhwc(
TILE(uint, 4, 1, dst_indirect_y);
// Calculate the destination indirect Y
- LOOP_UNROLLING(int, yk, 0, 2, 1)
+ LOOP_UNROLLING(int, yk, 0, 1, 2,
{
- LOOP_UNROLLING(int, xk, 0, 2, 1)
+ LOOP_UNROLLING(int, xk, 0, 1, 2,
{
int x_c = min(x_out + xk, ((int)_IDST_WIDTH - 1));
int y_c = min(y_out + yk, ((int)_IDST_HEIGHT - 1));
dst_indirect_y[xk + yk * 2].v = x_c + y_c * _IDST_WIDTH;
dst_indirect_y[xk + yk * 2].v += bout * (int)(_IDST_WIDTH * _IDST_HEIGHT);
- }
- }
+ })
+ })
// Store the tile in reverse order so the invalid values are overwritten with the valid ones
T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 4, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
@@ -630,17 +630,17 @@ __kernel void winograd_output_transform_4x4_3x3_nhwc(
TILE(DATA_TYPE, 4, N0, out);
TILE(uint, 6, 1, src_indirect_y);
- LOOP_UNROLLING(int, i, 0, 6, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 6,
{
src_indirect_y[i].v = mout + i * SRC_HEIGHT;
src_indirect_y[i].v += bout * (int)(SRC_HEIGHT * 6);
- }
+ })
// Initialize the input tile
- LOOP_UNROLLING(int, i, 0, 6, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 6,
{
in[i].v = 0;
- }
+ })
// Load the values across the 36 channels to compose the 6x6 or 6x1 tile
T_LOAD_INDIRECT(DATA_TYPE, 6, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in);
@@ -669,19 +669,19 @@ __kernel void winograd_output_transform_4x4_3x3_nhwc(
// Calculate the destination indirect Y
#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- LOOP_UNROLLING(int, yk, 0, 4, 1)
+ LOOP_UNROLLING(int, yk, 0, 1, 4,
{
int y_c = min(y_out + yk, ((int)DST_HEIGHT - 1));
dst_indirect_y[yk].v = x_out + y_c * DST_WIDTH;
dst_indirect_y[yk].v += bout * (int)(DST_WIDTH * DST_HEIGHT);
- }
+ })
#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- LOOP_UNROLLING(int, xk, 0, 4, 1)
+ LOOP_UNROLLING(int, xk, 0, 1, 4,
{
int x_c = min(x_out + xk, ((int)DST_WIDTH - 1));
dst_indirect_y[xk].v = x_c + y_out * DST_WIDTH;
dst_indirect_y[xk].v += bout * (int)(DST_WIDTH * DST_HEIGHT);
- }
+ })
#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
// Store the tile in reverse order so the invalid values are overwritten with the valid ones
@@ -694,22 +694,22 @@ __kernel void winograd_output_transform_4x4_3x3_nhwc(
TILE(DATA_TYPE, 4, N0, tmp);
TILE(uint, 36, 1, src_indirect_y);
- LOOP_UNROLLING(int, i, 0, 36, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 36,
{
src_indirect_y[i].v = mout + i * SRC_HEIGHT;
src_indirect_y[i].v += bout * (int)(SRC_HEIGHT * 36);
- }
+ })
// Initialize the input tile
- LOOP_UNROLLING(int, i, 0, 36, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 36,
{
in[i].v = 0;
- }
+ })
// Load the values across the 36 channels to compose the 6x6 or 6x1 tile
T_LOAD_INDIRECT(DATA_TYPE, 36, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in);
- LOOP_UNROLLING(int, i, 0, 6, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 6,
{
tmp[0].v = in[6 + i].v + in[12 + i].v;
tmp[1].v = in[6 + i].v - in[12 + i].v;
@@ -720,12 +720,12 @@ __kernel void winograd_output_transform_4x4_3x3_nhwc(
in[6 + i].v = tmp[3].v + tmp[1].v;
in[12 + i].v = fma(tmp[2].v, (VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[0].v);
in[18 + i].v = fma(tmp[3].v, (VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[1].v) + in[30 + i].v;
- }
+ })
// Compute the output tile
TILE(DATA_TYPE, 16, N0, out);
- LOOP_UNROLLING(int, i, 0, 4, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 4,
{
tmp[0].v = in[6 * i + 1].v + in[6 * i + 2].v;
tmp[1].v = in[6 * i + 1].v - in[6 * i + 2].v;
@@ -736,7 +736,7 @@ __kernel void winograd_output_transform_4x4_3x3_nhwc(
out[4 * i + 1].v = tmp[3].v + tmp[1].v;
out[4 * i + 2].v = fma(tmp[2].v, (VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[0].v);
out[4 * i + 3].v = fma(tmp[3].v, (VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[1].v) + in[6 * i + 5].v;
- }
+ })
#if defined(HAS_BIAS)
TILE(DATA_TYPE, 1, N0, b);
@@ -755,16 +755,16 @@ __kernel void winograd_output_transform_4x4_3x3_nhwc(
TILE(uint, 16, 1, dst_indirect_y);
// Calculate the destination indirect Y
- LOOP_UNROLLING(int, yk, 0, 4, 1)
+ LOOP_UNROLLING(int, yk, 0, 1, 4,
{
- LOOP_UNROLLING(int, xk, 0, 4, 1)
+ LOOP_UNROLLING(int, xk, 0, 1, 4,
{
int x_c = min(x_out + xk, ((int)DST_WIDTH - 1));
int y_c = min(y_out + yk, ((int)DST_HEIGHT - 1));
dst_indirect_y[xk + yk * 4].v = x_c + y_c * DST_WIDTH;
dst_indirect_y[xk + yk * 4].v += bout * (int)(DST_WIDTH * DST_HEIGHT);
- }
- }
+ })
+ })
// Store the tile in reverse order so the invalid values are overwritten with the valid ones
T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 16, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);
@@ -1076,17 +1076,17 @@ __kernel void winograd_output_transform_4x4_5x5_nhwc(
TILE(DATA_TYPE, 4, N0, tmp);
TILE(uint, 8, 1, src_indirect_y);
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
src_indirect_y[i].v = mout + i * SRC_HEIGHT;
src_indirect_y[i].v += bout * (int)(SRC_HEIGHT * 8);
- }
+ })
// Initialize the input tile
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
in[i].v = 0;
- }
+ })
// "in" contains 1x8 or 8x1 tile here
T_LOAD_INDIRECT(DATA_TYPE, 8, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in);
@@ -1119,19 +1119,19 @@ __kernel void winograd_output_transform_4x4_5x5_nhwc(
// Calculate the destination indirect Y
#if defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- LOOP_UNROLLING(int, yk, 0, 4, 1)
+ LOOP_UNROLLING(int, yk, 0, 1, 4,
{
int y_c = min(y_out + yk, ((int)DST_HEIGHT - 1));
dst_indirect_y[yk].v = x_out + y_c * DST_WIDTH;
dst_indirect_y[yk].v += bout * (int)(DST_WIDTH * DST_HEIGHT);
- }
+ })
#else // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
- LOOP_UNROLLING(int, xk, 0, 4, 1)
+ LOOP_UNROLLING(int, xk, 0, 1, 4,
{
int x_c = min(x_out + xk, ((int)DST_WIDTH - 1));
dst_indirect_y[xk].v = x_c + y_out * DST_WIDTH;
dst_indirect_y[xk].v += bout * (int)(DST_WIDTH * DST_HEIGHT);
- }
+ })
#endif // defined(WINOGRAD_OUTPUT_TRANSFORM_VERTICAL)
// Store the tile in reverse order so the invalid values are overwritten with the valid ones
@@ -1143,23 +1143,23 @@ __kernel void winograd_output_transform_4x4_5x5_nhwc(
TILE(DATA_TYPE, 6, N0, tmp);
TILE(uint, 64, 1, src_indirect_y);
- LOOP_UNROLLING(int, i, 0, 64, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 64,
{
src_indirect_y[i].v = mout + i * SRC_HEIGHT;
src_indirect_y[i].v += bout * (int)(SRC_HEIGHT * 64);
- }
+ })
// Initialize the input tile
- LOOP_UNROLLING(int, i, 0, 64, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 64,
{
in[i].v = 0;
- }
+ })
// "in" here is 8x8 tile
T_LOAD_INDIRECT(DATA_TYPE, 64, N0, BUFFER, src, cout, src_stride_y, src_indirect_y, in);
// A^T * in
- LOOP_UNROLLING(int, i, 0, 8, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 8,
{
tmp[0].v = in[8 + i].v + in[16 + i].v;
tmp[1].v = in[8 + i].v - in[16 + i].v;
@@ -1175,13 +1175,13 @@ __kernel void winograd_output_transform_4x4_5x5_nhwc(
in[8 + i].v = tmp[1].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[5].v, tmp[3].v);
in[16 + i].v = tmp[0].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[2].v, tmp[4].v);
in[24 + i].v = tmp[1].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[3].v, tmp[5].v) + in[56 + i].v;
- }
+ })
// Compute the output tile
TILE(DATA_TYPE, 16, N0, out);
// in * A, with in = A^T * in as above
- LOOP_UNROLLING(int, i, 0, 4, 1)
+ LOOP_UNROLLING(int, i, 0, 1, 4,
{
tmp[0].v = in[8 * i + 1].v + in[8 * i + 2].v;
tmp[1].v = in[8 * i + 1].v - in[8 * i + 2].v;
@@ -1197,7 +1197,7 @@ __kernel void winograd_output_transform_4x4_5x5_nhwc(
out[4 * i + 1].v = tmp[1].v + fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[5].v, tmp[3].v);
out[4 * i + 2].v = fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[2].v, tmp[0].v) + tmp[4].v;
out[4 * i + 3].v = fma((VEC_DATA_TYPE(DATA_TYPE, N0))4.0f, tmp[3].v, tmp[1].v) + tmp[5].v + in[8 * i + 7].v;
- }
+ })
#if defined(HAS_BIAS)
TILE(DATA_TYPE, 1, N0, b);
@@ -1216,16 +1216,16 @@ __kernel void winograd_output_transform_4x4_5x5_nhwc(
TILE(uint, 16, 1, dst_indirect_y);
// Calculate the destination indirect Y
- LOOP_UNROLLING(int, yk, 0, 4, 1)
+ LOOP_UNROLLING(int, yk, 0, 1, 4,
{
- LOOP_UNROLLING(int, xk, 0, 4, 1)
+ LOOP_UNROLLING(int, xk, 0, 1, 4,
{
int x_c = min(x_out + xk, ((int)DST_WIDTH - 1));
int y_c = min(y_out + yk, ((int)DST_HEIGHT - 1));
dst_indirect_y[xk + yk * 4].v = x_c + y_c * DST_WIDTH;
dst_indirect_y[xk + yk * 4].v += bout * (int)(DST_WIDTH * DST_HEIGHT);
- }
- }
+ })
+ })
// Store the tile in reverse order so the invalid values are overwritten with the valid ones
T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, 16, N0, 0, BUFFER, dst, cout, dst_stride_y, false, out, dst_indirect_y);