aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL/cl_kernels
diff options
context:
space:
mode:
authorSheri Zhang <sheri.zhang@arm.com>2020-10-27 10:19:41 +0000
committerSheri Zhang <sheri.zhang@arm.com>2020-10-29 10:48:51 +0000
commit7292362dce62b3f39d6c35e9601b5c12ab770a3f (patch)
treef9685c715a9e442998099f550889ab4b6bd594fe /src/core/CL/cl_kernels
parent5a4284dc7d98a382d0fa492b64fabe430d5afdc6 (diff)
downloadComputeLibrary-7292362dce62b3f39d6c35e9601b5c12ab770a3f.tar.gz
COMPMID-3737: Remove OpenCL padding: CLWidthConcatenate2TensorsKernel
Remove padding from CLWidthConcatenate2TensorsKernel Remove padding from CLWidthConcatenate4TensorsKernel Change-Id: I2142618e87bf11f831fe3b9375c4a7efda8d3a21 Signed-off-by: Sheri Zhang <sheri.zhang@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4266 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Diffstat (limited to 'src/core/CL/cl_kernels')
-rw-r--r--src/core/CL/cl_kernels/concatenate.cl103
1 files changed, 50 insertions, 53 deletions
diff --git a/src/core/CL/cl_kernels/concatenate.cl b/src/core/CL/cl_kernels/concatenate.cl
index 7c6c8d211a..19494b109f 100644
--- a/src/core/CL/cl_kernels/concatenate.cl
+++ b/src/core/CL/cl_kernels/concatenate.cl
@@ -53,7 +53,9 @@ inline VEC_QUANT requantize(VEC_QUANT input, float in_offset, float out_offset,
#error "Element size not supported"
#endif // ELEMENT_SIZE
-#if VEC_SIZE == 2
+#if VEC_SIZE == 1
+#define SEQ ((int)(0))
+#elif VEC_SIZE == 2
#define SEQ ((int2)(0, 1))
#elif VEC_SIZE == 4
#define SEQ ((int4)(0, 1, 2, 3))
@@ -69,7 +71,7 @@ inline VEC_QUANT requantize(VEC_QUANT input, float in_offset, float out_offset,
*
* @note The data type has to be passed at compile time using -DDATA_TYPE. i.e. -DDATA_TYPE=float
* @note Vector size has to be passed at compile time using -DVEC_SIZE. i.e. -DVEC_SIZE=16
- * @note The offset for the first spatial dimension has to be passed at compile time using -DWIDTH_OFFSET. i.e. -DWIDTH_OFFSET=128
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note Tensor depth should be given as a preprocessor argument using -DDEPTH=size. e.g. -DDEPTH=16
* @note First input tensor width should be given as a preprocessor argument using -DINPUT1_WIDTH=width. e.g. -DINPUT1_WIDTH=8
*
@@ -103,34 +105,29 @@ inline VEC_QUANT requantize(VEC_QUANT input, float in_offset, float out_offset,
* @param[in] dst_stride_w Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_step_w output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src1_pad_right Right paddings of the first input tensor in unit of elements
- * @param[in] src1_pad_left Left paddings of the second input tensor in unit of elements
*/
__kernel void concatenate_width_x2(
TENSOR4D_DECLARATION(src1),
TENSOR4D_DECLARATION(src2),
- TENSOR4D_DECLARATION(dst),
- uint src1_pad_right,
- uint src2_pad_left)
+ TENSOR4D_DECLARATION(dst))
{
- Tensor4D dst = CONVERT_TO_TENSOR4D_STRUCT(dst, DEPTH);
-
// Calculate input indices
- const int x = get_global_id(0) * (int)VEC_SIZE;
+ const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
const int y = get_global_id(1);
const int z = get_global_id(2) % (int)DEPTH;
const int w = get_global_id(2) / (int)DEPTH;
- const int x1 = min(x, (int)INPUT1_WIDTH + (int)src1_pad_right - (int)VEC_SIZE);
- const int x2 = max(x - (int)INPUT1_WIDTH, -(int)src2_pad_left);
+ const int x1 = min(x, (int)INPUT1_WIDTH - (int)VEC_SIZE);
+ const int x2 = max(x - (int)INPUT1_WIDTH, 0);
// Calculate inputs and output addresses
- const __global uchar *in1_ptr = src1_ptr + (int)src1_offset_first_element_in_bytes + x1 * (int)src1_stride_x + y * (int)src1_stride_y + z * (int)src1_stride_z + w * (int)src1_stride_w;
- const __global uchar *in2_ptr = src2_ptr + (int)src2_offset_first_element_in_bytes + x2 * (int)src2_stride_x + y * (int)src2_stride_y + z * (int)src2_stride_z + w * (int)src2_stride_w;
+ const __global uchar *dst_addr = dst_ptr + (int)dst_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * (int)dst_stride_y + z * (int)dst_stride_z + w * (int)dst_stride_w;
+ const __global uchar *src1_addr = src1_ptr + (int)src1_offset_first_element_in_bytes + x1 * sizeof(DATA_TYPE) + y * (int)src1_stride_y + z * (int)src1_stride_z + w * (int)src1_stride_w;
+ const __global uchar *src2_addr = src2_ptr + (int)src2_offset_first_element_in_bytes + x2 * sizeof(DATA_TYPE) + y * (int)src2_stride_y + z * (int)src2_stride_z + w * (int)src2_stride_w;
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- src1_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in1_ptr);
+ src1_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src1_addr);
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- src2_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in2_ptr);
+ src2_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src2_addr);
#if defined(OFFSET_IN1) && defined(OFFSET_IN2) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_IN2) && defined(SCALE_OUT)
src1_values = requantize(src1_values, OFFSET_IN1, OFFSET_OUT, SCALE_IN1, SCALE_OUT);
@@ -138,10 +135,14 @@ __kernel void concatenate_width_x2(
#endif /* defined(OFFSET_IN1) && defined(OFFSET_IN2) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_IN2) && defined(SCALE_OUT) */
const VEC_DATA_TYPE(int, VEC_SIZE) x_coords = SEQ + (VEC_DATA_TYPE(int, VEC_SIZE))(x);
const VEC_DATA_TYPE(COND_DATA_TYPE, VEC_SIZE) cond = CONVERT(x_coords < (VEC_DATA_TYPE(int, VEC_SIZE))(INPUT1_WIDTH), VEC_DATA_TYPE(COND_DATA_TYPE, VEC_SIZE));
- const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) values = select(src2_values, src1_values, cond);
- VSTORE(VEC_SIZE)
- (values, 0, (__global DATA_TYPE *)dst.ptr);
+ // Rotate src1/2_values, if values0 is a combination of src1_values and src2_values.
+ src1_values = (x < INPUT1_WIDTH && x1 == INPUT1_WIDTH - VEC_SIZE) ? ROTATE(src1_values, VEC_SIZE, INPUT1_ROTATE_N) : src1_values;
+ src2_values = (x < INPUT1_WIDTH && x1 == INPUT1_WIDTH - VEC_SIZE) ? ROTATE(src2_values, VEC_SIZE, INPUT1_ROTATE_N) : src2_values;
+
+ const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) values0 = select(src2_values, src1_values, cond);
+
+ STORE_VECTOR_SELECT(values, DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
#if defined(INPUT2_WIDTH) && defined(INPUT3_WIDTH)
@@ -149,7 +150,7 @@ __kernel void concatenate_width_x2(
*
* @note The data type has to be passed at compile time using -DDATA_TYPE. i.e. -DDATA_TYPE=float
* @note Vector size has to be passed at compile time using -DVEC_SIZE. i.e. -DVEC_SIZE=16
- * @note The offset for the first spatial dimension has to be passed at compile time using -DWIDTH_OFFSET. i.e. -DWIDTH_OFFSET=128
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note Tensor depth should be given as a preprocessor argument using -DDEPTH=size. e.g. -DDEPTH=16
* @note First input tensor width should be given as a preprocessor argument using -DINPUT1_WIDTH=width. e.g. -DINPUT1_WIDTH=8
* @note Second input tensor width should be given as a preprocessor argument using -DINPUT2_WIDTH=width. e.g. -DINPUT2_WIDTH=8
@@ -205,53 +206,40 @@ __kernel void concatenate_width_x2(
* @param[in] dst_stride_w Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_step_w output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] src1_pad_right Right paddings of the first input tensor in unit of elements
- * @param[in] src2_pad_left Left paddings of the second input tensor in unit of elements
- * @param[in] src2_pad_right Right paddings of the second input tensor in unit of elements
- * @param[in] src3_pad_left Left paddings of the third input tensor in unit of elements
- * @param[in] src3_pad_right Right paddings of the third input tensor in unit of elements
- * @param[in] src4_pad_left Left paddings of the fourth input tensor in unit of elements
*/
__kernel void concatenate_width_x4(
TENSOR4D_DECLARATION(src1),
TENSOR4D_DECLARATION(src2),
TENSOR4D_DECLARATION(src3),
TENSOR4D_DECLARATION(src4),
- TENSOR4D_DECLARATION(dst),
- uint src1_pad_right,
- uint src2_pad_left,
- uint src2_pad_right,
- uint src3_pad_left,
- uint src3_pad_right,
- uint src4_pad_left)
+ TENSOR4D_DECLARATION(dst))
{
- Tensor4D dst = CONVERT_TO_TENSOR4D_STRUCT(dst, DEPTH);
-
// Calculate input indices
- const int x = get_global_id(0) * (int)VEC_SIZE;
+ const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
const int y = get_global_id(1);
const int z = get_global_id(2) % (int)DEPTH;
const int w = get_global_id(2) / (int)DEPTH;
- const int x1 = min(x, (int)INPUT1_WIDTH + (int)src1_pad_right - (int)VEC_SIZE);
- const int x2 = min(max(x - (int)INPUT1_WIDTH, -(int)src2_pad_left), (int)INPUT2_WIDTH + (int)src2_pad_right - (int)VEC_SIZE);
- const int x3 = min(max(x - (int)INPUT1_WIDTH - (int)INPUT2_WIDTH, -(int)src3_pad_left), (int)INPUT3_WIDTH + (int)src3_pad_right - (int)VEC_SIZE);
- const int x4 = max(x - (int)INPUT1_WIDTH - (int)INPUT2_WIDTH - (int)INPUT3_WIDTH, -(int)src4_pad_left);
+ const int x1 = min(x, (int)INPUT1_WIDTH - (int)VEC_SIZE);
+ const int x2 = min(max(x - (int)INPUT1_WIDTH, 0), (int)INPUT2_WIDTH - (int)VEC_SIZE);
+ const int x3 = min(max(x - (int)INPUT1_WIDTH - (int)INPUT2_WIDTH, 0), (int)INPUT3_WIDTH - (int)VEC_SIZE);
+ const int x4 = max(x - (int)INPUT1_WIDTH - (int)INPUT2_WIDTH - (int)INPUT3_WIDTH, 0);
// Calculate inputs and output addresses
- const __global uchar *in1_ptr = src1_ptr + (int)src1_offset_first_element_in_bytes + x1 * (int)src1_stride_x + y * (int)src1_stride_y + z * (int)src1_stride_z + w * (int)src1_stride_w;
- const __global uchar *in2_ptr = src2_ptr + (int)src2_offset_first_element_in_bytes + x2 * (int)src2_stride_x + y * (int)src2_stride_y + z * (int)src2_stride_z + w * (int)src2_stride_w;
- const __global uchar *in3_ptr = src3_ptr + (int)src3_offset_first_element_in_bytes + x3 * (int)src3_stride_x + y * (int)src3_stride_y + z * (int)src3_stride_z + w * (int)src3_stride_w;
- const __global uchar *in4_ptr = src4_ptr + (int)src4_offset_first_element_in_bytes + x4 * (int)src4_stride_x + y * (int)src4_stride_y + z * (int)src4_stride_z + w * (int)src4_stride_w;
+ const __global uchar *dst_addr = dst_ptr + (int)dst_offset_first_element_in_bytes + x * sizeof(DATA_TYPE) + y * (int)dst_stride_y + z * (int)dst_stride_z + w * (int)dst_stride_w;
+ const __global uchar *src1_addr = src1_ptr + (int)src1_offset_first_element_in_bytes + x1 * sizeof(DATA_TYPE) + y * (int)src1_stride_y + z * (int)src1_stride_z + w * (int)src1_stride_w;
+ const __global uchar *src2_addr = src2_ptr + (int)src2_offset_first_element_in_bytes + x2 * sizeof(DATA_TYPE) + y * (int)src2_stride_y + z * (int)src2_stride_z + w * (int)src2_stride_w;
+ const __global uchar *src3_addr = src3_ptr + (int)src3_offset_first_element_in_bytes + x3 * sizeof(DATA_TYPE) + y * (int)src3_stride_y + z * (int)src3_stride_z + w * (int)src3_stride_w;
+ const __global uchar *src4_addr = src4_ptr + (int)src4_offset_first_element_in_bytes + x4 * sizeof(DATA_TYPE) + y * (int)src4_stride_y + z * (int)src4_stride_z + w * (int)src4_stride_w;
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- src1_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in1_ptr);
+ src1_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src1_addr);
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- src2_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in2_ptr);
+ src2_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src2_addr);
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- src3_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in3_ptr);
+ src3_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src3_addr);
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- src4_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in4_ptr);
+ src4_values = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)src4_addr);
#if defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) && defined(OFFSET_IN2) && defined(SCALE_IN2) && defined(OFFSET_IN3) && defined(SCALE_IN3) && defined(OFFSET_IN4) && defined(SCALE_IN4)
src1_values = requantize(src1_values, OFFSET_IN1, OFFSET_OUT, SCALE_IN1, SCALE_OUT);
@@ -266,13 +254,22 @@ __kernel void concatenate_width_x4(
const VEC_DATA_TYPE(COND_DATA_TYPE, VEC_SIZE) cond_in3 = CONVERT(x_coords < (VEC_DATA_TYPE(int, VEC_SIZE))(INPUT1_WIDTH + INPUT2_WIDTH), VEC_DATA_TYPE(COND_DATA_TYPE, VEC_SIZE));
const VEC_DATA_TYPE(COND_DATA_TYPE, VEC_SIZE) cond_in4 = CONVERT(x_coords < (VEC_DATA_TYPE(int, VEC_SIZE))(INPUT1_WIDTH + INPUT2_WIDTH + INPUT3_WIDTH), VEC_DATA_TYPE(COND_DATA_TYPE, VEC_SIZE));
+ // Rotate src1/2_values, if values0 is a combination of src1_values and src2_values.
+ src1_values = (x < INPUT1_WIDTH && x1 == INPUT1_WIDTH - VEC_SIZE) ? ROTATE(src1_values, VEC_SIZE, INPUT1_ROTATE_N) : src1_values;
+ src2_values = (x < INPUT1_WIDTH && x1 == INPUT1_WIDTH - VEC_SIZE) ? ROTATE(src2_values, VEC_SIZE, INPUT1_ROTATE_N) : src2_values;
+ // Rotate src2/3_values, if values0 is a combination of src2_values and src3_values.
+ src2_values = (x < INPUT1_WIDTH + INPUT2_WIDTH && x2 == INPUT2_WIDTH - VEC_SIZE) ? ROTATE(src2_values, VEC_SIZE, INPUT2_ROTATE_N) : src2_values;
+ src3_values = (x < INPUT1_WIDTH + INPUT2_WIDTH && x2 == INPUT2_WIDTH - VEC_SIZE) ? ROTATE(src3_values, VEC_SIZE, INPUT2_ROTATE_N) : src3_values;
+ // Rotate src3/4_values, if values0 is a combination of src3_values and src4_values.
+ src3_values = (x < INPUT1_WIDTH + INPUT2_WIDTH + INPUT3_WIDTH && x3 == INPUT3_WIDTH - VEC_SIZE) ? ROTATE(src3_values, VEC_SIZE, INPUT3_ROTATE_N) : src3_values;
+ src4_values = (x < INPUT1_WIDTH + INPUT2_WIDTH + INPUT3_WIDTH && x3 == INPUT3_WIDTH - VEC_SIZE) ? ROTATE(src4_values, VEC_SIZE, INPUT3_ROTATE_N) : src4_values;
+
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values = select(src2_values, src1_values, cond_in2);
- values = select(src3_values, values, cond_in3);
- values = select(src4_values, values, cond_in4);
+ values0 = select(src2_values, src1_values, cond_in2);
+ values0 = select(src3_values, values0, cond_in3);
+ values0 = select(src4_values, values0, cond_in4);
- VSTORE(VEC_SIZE)
- (values, 0, (__global DATA_TYPE *)dst.ptr);
+ STORE_VECTOR_SELECT(values, DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
#endif /* defined(INPUT2_WIDTH) && defined(INPUT3_WIDTH) */
#endif /* defined(INPUT1_WIDTH) */