aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL/cl_kernels
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2022-11-17 11:03:39 +0000
committerGian Marco Iodice <gianmarco.iodice@arm.com>2022-12-09 11:06:23 +0000
commit76335eb8d8733b0bbc0110546797211540870c50 (patch)
tree812fc44de593c9e1e45ac8b534094511b06163bf /src/core/CL/cl_kernels
parentf16973b8b4605f12608bffa9f0ca6ed590202d41 (diff)
downloadComputeLibrary-76335eb8d8733b0bbc0110546797211540870c50.tar.gz
Implement the OpenCL kernel to compute the indirect convolution
- Implement indirect convolution kernel - Add operator support - Add test Resolves COMPMID-5709 Change-Id: I9272304163471a5a40da7fdec204599f3c1d8e32 Signed-off-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8701 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/CL/cl_kernels')
-rw-r--r--src/core/CL/cl_kernels/nhwc/direct_convolution.cl37
-rw-r--r--src/core/CL/cl_kernels/nhwc/indirect_convolution.cl224
-rw-r--r--src/core/CL/cl_kernels/nhwc/transposed_convolution.cl43
-rw-r--r--src/core/CL/cl_kernels/tile_helpers.h20
4 files changed, 277 insertions, 47 deletions
diff --git a/src/core/CL/cl_kernels/nhwc/direct_convolution.cl b/src/core/CL/cl_kernels/nhwc/direct_convolution.cl
index 2e7ed5a4ca..8be8e00f0a 100644
--- a/src/core/CL/cl_kernels/nhwc/direct_convolution.cl
+++ b/src/core/CL/cl_kernels/nhwc/direct_convolution.cl
@@ -53,7 +53,7 @@
* @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
* @note The zero value must be passed at compile time using -DZERO_VALUE (e.g. -DZERO_VALUE=0)
* @note Only the following configurations of M0, N0 and K0 are currently supported:
- * - M0 = 1, 2, 3, 4, 5, .... n
+ * - M0 = 1, 2, 3, 4, 5, 6, 7, and 8
* - N0 = 2, 3, 4, 8, 16
* - K0 = 2, 3, 4, 8, 16 (only 4, 8 and 16 if WEI_TENSOR_TYPE=IMAGE)
*
@@ -137,16 +137,16 @@ __kernel void direct_convolution_nhwc(
// .v = access the whole vector (OpenCL vector)
// .s[x] = access the vector element at position x (scalar access)
- TILE(int, M0, 1, xi);
- TILE(int, M0, 1, yi);
+ TILE(int, 1, M0, xi);
+ TILE(int, 1, M0, yi);
// Convert the linear index to coordinate
LOOP_UNROLLING(int, i, 0, 1, M0,
{
- xi[i].v = ((mout + i) % _IDST_WIDTH) * STRIDE_X;
- yi[i].v = ((mout + i) / _IDST_WIDTH) * STRIDE_Y;
- xi[i].v -= PAD_LEFT;
- yi[i].v -= PAD_TOP;
+ xi[0].s[i] = ((mout + i) % _IDST_WIDTH) * STRIDE_X;
+ yi[0].s[i] = ((mout + i) / _IDST_WIDTH) * STRIDE_Y;
+ xi[0].s[i] -= PAD_LEFT;
+ yi[0].s[i] -= PAD_TOP;
})
// Initialize the accumulators
@@ -162,18 +162,18 @@ __kernel void direct_convolution_nhwc(
int xk = i % _IWEI_WIDTH;
int yk = i / _IWEI_WIDTH;
- TILE(int, M0, 1, my);
+ TILE(int, 1, M0, my);
LOOP_UNROLLING(int, i, 0, 1, M0,
{
- int x_s = xi[i].v + xk;
- int y_s = yi[i].v + yk;
- my[i].v = x_s + y_s *_ISRC_WIDTH;
- my[i].v = my[i].v + bout * (int)(_ISRC_WIDTH * _ISRC_HEIGHT);
- my[i].v = select(-1, my[i].v, x_s >= 0);
- my[i].v = select(-1, my[i].v, x_s < _ISRC_WIDTH);
- my[i].v = select(-1, my[i].v, y_s >= 0);
- my[i].v = select(-1, my[i].v, y_s < _ISRC_HEIGHT);
+ int x_s = xi[0].s[i] + xk;
+ int y_s = yi[0].s[i] + yk;
+ my[0].s[i] = x_s + y_s *_ISRC_WIDTH;
+ my[0].s[i] = my[0].s[i] + bout * (int)(_ISRC_WIDTH * _ISRC_HEIGHT);
+ my[0].s[i] = select(-1, my[0].s[i], x_s >= 0);
+ my[0].s[i] = select(-1, my[0].s[i], x_s < _ISRC_WIDTH);
+ my[0].s[i] = select(-1, my[0].s[i], y_s >= 0);
+ my[0].s[i] = select(-1, my[0].s[i], y_s < _ISRC_HEIGHT);
})
int ck = 0;
@@ -189,7 +189,7 @@ __kernel void direct_convolution_nhwc(
})
// Load tile from the src tensor
- T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, bout, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, my, a);
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
// Load tile from the weights tensor
T_LOAD(WEI_DATA_TYPE, N0, K0, WEI_TENSOR_TYPE, wei, ck, cout * _IY_MULTIPLIER + i, _IY_MULTIPLIER, wei_stride_y, b);
@@ -202,7 +202,6 @@ __kernel void direct_convolution_nhwc(
T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, K0, SRC_OFFSET, WEI_OFFSET, a, b, c);
}
- // We voluntarily use SRC_CHANNELS rather than _DSRC_CHANNELS
// This #if directive should be removed in case of dynamic tensor support
#if defined(LEFTOVER_LOOP)
// Left-over accumulations
@@ -223,7 +222,7 @@ __kernel void direct_convolution_nhwc(
})
// Load tile from the src tensor
- T_LOAD_NHWC_INDIRECT(SRC_DATA_TYPE, M0, 1, SRC_TENSOR_TYPE, src, bout, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, xi, yi, a);
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, 1, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
// Load tile from the weights tensor
// The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
diff --git a/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl b/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl
index 07c7212e77..c88f0034c5 100644
--- a/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl
+++ b/src/core/CL/cl_kernels/nhwc/indirect_convolution.cl
@@ -21,13 +21,16 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-
+#include "activation_float_helpers.h"
#include "helpers.h"
#include "tile_helpers.h"
+#if defined(INDIRECT_CONVOLUTION_ADDRESS_PRECALCULATION)
//! @cond Doxygen_Suppress
/** OpenCL kernel to compute the indirect convolution 2d indirect buffer.
*
+ * @note This kernel only works for unit batch_size
+ *
* @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
* @note The convolution strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y (e.g. -DSTRIDE_X=2, -DSTRIDE_Y=2)
* @note The kernel width must be passed at compile time using -DWEI_CONV_WIDTH (e.g. -DWEI_CONV_WIDTH=9)
@@ -38,7 +41,7 @@
* @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
* - M0 = 1, 2, 3, 4, 5, 6, 7, and 8
*
- * @param[out] dst_img CLImage object to the destination tensor (DST_TENSOR_TYPE=IMAGE only)
+ * @param[out] dst_img (Not supported) CLImage object to the destination tensor (DST_TENSOR_TYPE=IMAGE only)
* @param[out] dst_ptr Pointer to the destination tensor. Supported data type: INT32
* @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
@@ -90,4 +93,219 @@ __kernel void indirect_convolution_address_precalculation(
VSTORE(1)
(my[0].s[0], 0, (__global DST_DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(DST_DATA_TYPE) + y * dst_stride_y + z * dst_stride_z));
-} \ No newline at end of file
+}
+#endif // defined(INDIRECT_CONVOLUTION_ADDRESS_PRECALCULATION)
+
+#if defined(INDIRECT_CONVOLUTION_NHWC)
+//! @cond Doxygen_Suppress
+/** OpenCL kernel to compute the indirect convolution.
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16
+ * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
+ * @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH and -DDST_HEIGHT (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64)
+ * @note The channels of the source tensor must be passed at compile time using -DSRC_CHANNELS (e.g. -DSRC_CHANNELS=64)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the source tensor must be passed at compile time using -DSRC_TENSOR_TYPE (e.g. -DSRC_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the weights tensor must be passed at compile time using -DWEI_TENSOR_TYPE (e.g. -DWEI_TENSOR_TYPE=BUFFER)
+ * @note The tensor type ("BUFFER" or "IMAGE") of the destination tensor must be passed at compile time using -DDST_TENSOR_TYPE (e.g. -DDST_TENSOR_TYPE=BUFFER)
+ * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
+ * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=float)
+ * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
+ * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
+ * @note The number of K0 inner accumulations must be passed at compile time using -DK0 (e.g. -DK0=2)
+ * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_N0 (e.g. -DPARTIAL_N0=1)
+ * @note The vector length used for loading the values from the indirect buffer should be passed at compile time using -DIND_BUFF_VEC_SIZE (e.g. -DIND_BUFF_VEC_SIZE=4)
+ * @note The activation function to fuse and corresponding A and B values should be passed at compile time using -DACTIVATION_TYPE, -DA_VAL, and -DB_VAL
+ * (e.g. -DFUNCTION_TYPE=lu_brelu_op, -DA_VAL=3.0, and -DB_VAL=1.0)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 = 1, 2, 3, 4, 5, 6, and 8
+ * - N0 = 2, 3, 4, 8, 16
+ * - K0 = 2, 3, 4, 8, 16 (only 4, 8 and 16 if WEI_TENSOR_TYPE=IMAGE)
+ *
+ * @param[in] src_img (Not supported) CLImage object to the source tensor (SRC_TENSOR_TYPE=IMAGE only)
+ * @param[in] src_ptr Pointer to the source tensor. Supported data type: F16/F32
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_c The size of the channels dimension of the source tensor
+ * @param[in] src_w The size of the width dimension of the source tensor
+ * @param[in] src_h The size of the height dimension of the source tensor
+ * @param[in] src_n The size of the batches dimension of the source tensor
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] off_img (Not supported) CLImage object to the indirect buffer tensor (OFF_TENSOR_TYPE=IMAGE only)
+ * @param[in] off_ptr Pointer to the indirect buffer tensor. Supported data type: INT32
+ * @param[in] off_stride_y Stride of the indirect buffer tensor in Y dimension (in bytes)
+ * @param[in] off_stride_z Stride of the indirect buffer tensor in Z dimension (in bytes)
+ * @param[in] off_stride_w Stride of the indirect buffer tensor in W dimension (in bytes)
+ * @param[in] off_c The size of the channels dimension of the indirect buffer tensor
+ * @param[in] off_w The size of the width dimension of the indirect buffer tensor
+ * @param[in] off_h The size of the height dimension of the indirect buffer tensor
+ * @param[in] off_n The size of the batches dimension of the indirect buffer tensor
+ * @param[in] off_offset_first_element_in_bytes The offset of the first element in the indirect buffer tensor
+ * @param[out] dst_img (Not supported) CLImage object to the destination tensor (DST_TENSOR_TYPE=IMAGE only)
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as the input tensor
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes)
+ * @param[in] dst_c The size of the channels dimension of the destination tensor
+ * @param[in] dst_w The size of the width dimension of the destination tensor
+ * @param[in] dst_h The size of the height dimension of the destination tensor
+ * @param[in] dst_n The size of the batches dimension of the destination tensor
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[out] wei_img (Optional) CLImage object to the destination tensor (WEI_TENSOR_TYPE=IMAGE only)
+ * @param[out] wei_ptr Pointer to the weights tensor. Supported data type: same as the input tensor
+ * @param[in] wei_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] wei_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes)
+ * @param[in] wei_c The size of the channels dimension of the weights tensor
+ * @param[in] wei_w The size of the width dimension of the weights tensor
+ * @param[in] wei_h The size of the height dimension of the weights tensor
+ * @param[in] wei_n The size of the batches dimension of the weights tensor
+ * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the weights tensor
+ * @param[out] bia_img (Not supported) CLImage object to the destination tensor (BIA_TENSOR_TYPE=IMAGE only)
+ * @param[out] bia_ptr (Optional) Pointer to the bias tensor. Supported data type: same as the input tensor
+ * @param[in] bia_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
+ * @param[in] bia_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
+ * @param[in] bia_stride_w (Optional) Stride of the bias tensor in W dimension (in bytes)
+ * @param[in] bia_c (Optional) The size of the channels dimension of the bias tensor
+ * @param[in] bia_w (Optional) The size of the width dimension of the bias tensor
+ * @param[in] bia_h (Optional) The size of the height dimension of the bias tensor
+ * @param[in] bia_n (Optional) The size of the batches dimension of the bias tensor
+ * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
+ */
+//! @endcond
+__kernel void indirect_convolution_nhwc(
+ TENSOR4D_T(src, SRC_TENSOR_TYPE),
+ TENSOR4D_T(off, OFF_TENSOR_TYPE),
+ TENSOR4D_T(dst, DST_TENSOR_TYPE),
+ TENSOR4D_T(wei, WEI_TENSOR_TYPE)
+#if defined(HAS_BIAS)
+ ,
+ VECTOR_DECLARATION(bia)
+#endif // defined(HAS_BIAS)
+)
+{
+ // All the tensor dimensions are passed at compile time.
+ // In case of dynamic tensor support, the following dimensions should be passed as function argument.
+#define _IWEI_WIDTH WEI_WIDTH
+#define _IWEI_HEIGHT WEI_HEIGHT
+#define _ISRC_CHANNELS SRC_CHANNELS
+#define _IDST_WIDTH DST_WIDTH
+#define _IDST_HEIGHT DST_HEIGHT
+#define _IY_MULTIPLIER (_IWEI_WIDTH * _IWEI_HEIGHT)
+
+ const int cout = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // OFM
+ const int mout = GET_SPATIAL_IDX(1, M0, 0); // WIDTH x HEIGHT
+ const int bout = GET_SPATIAL_IDX(2, 1, 0); // BATCH SIZE IDX
+
+ off_offset_first_element_in_bytes += get_global_id(1) * off_stride_y;
+ off_offset_first_element_in_bytes += bout * off_stride_z;
+
+ // Initialize the accumulators
+ TILE(DST_DATA_TYPE, M0, N0, c);
+
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ c[i].v = 0;
+ })
+
+ for(int i = 0; i < (_IWEI_WIDTH * _IWEI_HEIGHT); ++i)
+ {
+ TILE(int, 1, IND_BUFF_VEC_SIZE, my);
+ T_LOAD(int, 1, IND_BUFF_VEC_SIZE, OFF_TENSOR_TYPE, off, i * M0, 0, 1, 0, my);
+
+ int ck = 0;
+ for(; ck <= (_ISRC_CHANNELS - K0); ck += K0)
+ {
+ TILE(SRC_DATA_TYPE, M0, K0, a);
+ TILE(WEI_DATA_TYPE, N0, K0, b);
+
+ // Initialize tiles
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0.0;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0.0;
+ })
+
+ // Load tile from the src tensor
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
+
+ // Load tile from the weights tensor
+ T_LOAD(WEI_DATA_TYPE, N0, K0, WEI_TENSOR_TYPE, wei, ck, cout * _IY_MULTIPLIER + i, _IY_MULTIPLIER, wei_stride_y, b);
+
+ // Compute the matrix multiplication between two tiles
+ T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, NT, T, a, b, c);
+ }
+
+ // This #if directive should be removed in case of dynamic tensor support
+#if defined(LEFTOVER_LOOP)
+ // Left-over accumulations
+ for(; ck < _ISRC_CHANNELS; ++ck)
+ {
+ TILE(SRC_DATA_TYPE, M0, 1, a);
+ TILE(WEI_DATA_TYPE, N0, 1, b);
+
+ // Initialize tiles
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ a[i].v = 0.0;
+ })
+
+ LOOP_UNROLLING(int, i, 0, 1, N0,
+ {
+ b[i].v = 0.0;
+ })
+
+ // Load tile from the src tensor
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, 1, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
+
+ // Load tile from the weights tensor
+ // The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
+ T_LOAD(WEI_DATA_TYPE, N0, 1, BUFFER, wei, ck, cout * _IY_MULTIPLIER + i, _IY_MULTIPLIER, wei_stride_y, b);
+
+ // Compute the matrix multiplication between two tiles
+ T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, DST_DATA_TYPE, M0, N0, 1, NT, T, a, b, c);
+ }
+#endif // defined(LEFTOVER_LOOP)
+ }
+
+#if defined(HAS_BIAS)
+ TILE(BIA_DATA_TYPE, 1, N0, bias0);
+
+ T_LOAD(BIA_DATA_TYPE, 1, N0, BUFFER, bia, cout, 0, 1, 0, bias0);
+
+ // c = c + bias[broadcasted]
+ T_ELTWISE_BROADCAST_ADD_X(DST_DATA_TYPE, M0, N0, c, bias0, c);
+
+#endif // HAS_BIAS
+
+ // Apply activation
+ T_ACTIVATION(DST_DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, c, c);
+
+ TILE(uint, M0, 1, dst_indirect_y);
+
+ // Calculate the destination indirect Y
+ LOOP_UNROLLING(int, i, 0, 1, M0,
+ {
+ dst_indirect_y[i].v = (uint)min(mout + i, (int)(_IDST_WIDTH * _IDST_HEIGHT) - 1);
+ dst_indirect_y[i].v += bout * (int)(_IDST_WIDTH * _IDST_HEIGHT);
+ })
+
+ const bool x_cond = PARTIAL_N0 != 0 && get_global_id(0) == 0;
+
+ // Store the tile in reverse order so the invalid values are overwritten with the valid ones
+ T_STORE_INDIRECT_WIDTH_SELECT(DST_DATA_TYPE, M0, N0, PARTIAL_N0, DST_TENSOR_TYPE, dst, cout, dst_stride_y, x_cond, c, dst_indirect_y);
+
+#undef _IWEI_WIDTH
+#undef _IWEI_HEIGHT
+#undef _ISRC_CHANNELS
+#undef _IDST_WIDTH
+#undef _IDST_HEIGHT
+#undef _IY_MULTIPLIER
+}
+#endif // defined(INDIRECT_CONVOLUTION_NHWC)
diff --git a/src/core/CL/cl_kernels/nhwc/transposed_convolution.cl b/src/core/CL/cl_kernels/nhwc/transposed_convolution.cl
index 8872c31229..c01a44f117 100644
--- a/src/core/CL/cl_kernels/nhwc/transposed_convolution.cl
+++ b/src/core/CL/cl_kernels/nhwc/transposed_convolution.cl
@@ -114,18 +114,18 @@ __kernel void transposed_convolution_nhwc(
// .v = access the whole vector (OpenCL vector)
// .s[x] = access the vector element at position x (scalar access)
- TILE(int, M0, 1, xi);
- TILE(int, M0, 1, yi);
- TILE(int, M0, 1, xu);
- TILE(int, M0, 1, yu);
+ TILE(int, 1, M0, xi);
+ TILE(int, 1, M0, yi);
+ TILE(int, 1, M0, xu);
+ TILE(int, 1, M0, yu);
// Convert the linear index to coordinate
LOOP_UNROLLING(int, i, 0, 1, M0,
{
- xu[i].v = ((mout + i) % _IDST_WIDTH) - PAD_LEFT;
- yu[i].v = ((mout + i) / _IDST_WIDTH) - PAD_TOP;
- xi[i].v = ceil(xu[i].v / (float)STRIDE_X);
- yi[i].v = ceil(yu[i].v / (float)STRIDE_Y);
+ xu[0].s[i] = ((mout + i) % _IDST_WIDTH) - PAD_LEFT;
+ yu[0].s[i] = ((mout + i) / _IDST_WIDTH) - PAD_TOP;
+ xi[0].s[i] = ceil(xu[0].s[i] / (float)STRIDE_X);
+ yi[0].s[i] = ceil(yu[0].s[i] / (float)STRIDE_Y);
})
// Initialize the accumulators
@@ -137,8 +137,8 @@ __kernel void transposed_convolution_nhwc(
})
// Flipped indices
- const int x_start = _IWEI_WIDTH - (xi[0].v * STRIDE_X - xu[0].v) - 1;
- const int y_start = _IWEI_HEIGHT - (yi[0].v * STRIDE_Y - yu[0].v) - 1;
+ const int x_start = _IWEI_WIDTH - (xi[0].s[0] * STRIDE_X - xu[0].s[0]) - 1;
+ const int y_start = _IWEI_HEIGHT - (yi[0].s[0] * STRIDE_Y - yu[0].s[0]) - 1;
for(int yk = y_start, yi_step = 0; yk >= 0; yk -= STRIDE_Y, ++yi_step)
{
@@ -146,18 +146,18 @@ __kernel void transposed_convolution_nhwc(
{
int weights_y = cout * _IY_MULTIPLIER + yk * _IWEI_WIDTH + xk;
- TILE(int, M0, 1, my);
+ TILE(int, 1, M0, my);
LOOP_UNROLLING(int, i, 0, 1, M0,
{
- int x_s = xi[i].v + xi_step;
- int y_s = yi[i].v + yi_step;
- my[i].v = x_s + y_s *_ISRC_WIDTH;
- my[i].v = my[i].v + bout * (int)(_ISRC_WIDTH * _ISRC_HEIGHT);
- my[i].v = select(-1, my[i].v, x_s >= 0);
- my[i].v = select(-1, my[i].v, x_s < _ISRC_WIDTH);
- my[i].v = select(-1, my[i].v, y_s >= 0);
- my[i].v = select(-1, my[i].v, y_s < _ISRC_HEIGHT);
+ int x_s = xi[0].s[i] + xi_step;
+ int y_s = yi[0].s[i] + yi_step;
+ my[0].s[i] = x_s + y_s *_ISRC_WIDTH;
+ my[0].s[i] = my[0].s[i] + bout * (int)(_ISRC_WIDTH * _ISRC_HEIGHT);
+ my[0].s[i] = select(-1, my[0].s[i], x_s >= 0);
+ my[0].s[i] = select(-1, my[0].s[i], x_s < _ISRC_WIDTH);
+ my[0].s[i] = select(-1, my[0].s[i], y_s >= 0);
+ my[0].s[i] = select(-1, my[0].s[i], y_s < _ISRC_HEIGHT);
})
int ck = 0;
@@ -178,7 +178,7 @@ __kernel void transposed_convolution_nhwc(
})
// Load tile from the src tensor
- T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, bout, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, my, a);
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, K0, SRC_TENSOR_TYPE, src, ck, src_stride_y, my, a);
// Load tile from the weights tensor
T_LOAD(WEI_DATA_TYPE, N0, K0, WEI_TENSOR_TYPE, wei, ck, weights_y, _IY_MULTIPLIER, wei_stride_y, b);
@@ -187,7 +187,6 @@ __kernel void transposed_convolution_nhwc(
T_MMUL(SRC_DATA_TYPE, WEI_DATA_TYPE, ACC_DATA_TYPE, M0, N0, K0, NT, T, a, b, c);
}
- // We voluntarily use SRC_CHANNELS rather than _DSRC_CHANNELS
// This #if directive should be removed in case of dynamic tensor support
#if defined(LEFTOVER_LOOP)
// Left-over accumulations
@@ -204,7 +203,7 @@ __kernel void transposed_convolution_nhwc(
// Load tile from the src tensor
// The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
- T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, 1, BUFFER, src, bout, yk, xk, ck, _ISRC_WIDTH, _ISRC_HEIGHT, src_stride_y, my, a);
+ T_LOAD2D_INDIRECT(SRC_DATA_TYPE, M0, 1, BUFFER, src, ck, src_stride_y, my, a);
// Load tile from the weights tensor
// The T_LOAD for the left-over elements can only use BUFFER because we load one element per iteration
diff --git a/src/core/CL/cl_kernels/tile_helpers.h b/src/core/CL/cl_kernels/tile_helpers.h
index 998bc9efb2..861ea63eca 100644
--- a/src/core/CL/cl_kernels/tile_helpers.h
+++ b/src/core/CL/cl_kernels/tile_helpers.h
@@ -653,13 +653,27 @@
}) \
})
-#define T_LOAD2D_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, STRIDE_Y, yi, dst) \
+/** Load a tile from global memory (tensor) using an indirect buffer for the Y coordinates
+ *
+ * @param[in] DATA_TYPE Data type
+ * @param[in] TILE_AREA Number of elements to load from Y (height) dimension * Number of elements to load from X (width) dimension
+ * @param[in] TILE_CHANNELS Number of elements to load from C (channel) dimension
+ * @param[in] TENSOR_TYPE Type of cl_type used to store the tensor in global memory (BUFFER=cl_buffer, IMAGE=cl_image). Currently BUFFER only is supported
+ * In case of cl_image, only TILE_CHANNELS multiples of 4 are supported (4, 8, 16)
+ * @param[in] TENSOR Tensor basename
+ * @param[in] C Starting C index
+ * @param[in] STRIDE_Y Stride Y (in bytes)
+ * @param[out] yi A tile with (TILE_WIDTH x TILE_HEIGHT) values with the indirect Y coordinate
+ * 16 is the maximum indirect buffer size.
+ * @param[out] dst Output tile
+ */
+#define T_LOAD2D_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) \
({ \
LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \
{ \
- if(yi[_i].v >= 0) \
+ if(yi[0].s[_i] >= 0) \
{ \
- dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, yi[_i].v, STRIDE_Y); \
+ dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, yi[0].s[_i], STRIDE_Y); \
} \
}) \
})