aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2021-09-01 14:05:00 +0100
committerGiorgio Arena <giorgio.arena@arm.com>2021-09-03 14:04:19 +0000
commit8fce496a715929372b3c448a233713d87d65f768 (patch)
tree283841880dd0c969addda1c08f50fc6e622ff07d
parentb8025b3bb1b75fa94400a665e65a1d53ba9965f9 (diff)
downloadComputeLibrary-8fce496a715929372b3c448a233713d87d65f768.tar.gz
Remove padding from ClPool2dKernel NCHW
- Simplify NCHW kernel structure by removing old optimized paths - Merge quantized with fp kernels Resolve COMPMID-4722 Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Change-Id: I79016b119619aed6a6193295601cd6517f14b88c Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6183 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
-rw-r--r--SConscript2
-rw-r--r--src/core/CL/cl_kernels/common/pooling_layer.cl390
-rw-r--r--src/core/CL/cl_kernels/nchw/pooling_layer.cl328
-rw-r--r--src/core/CL/cl_kernels/nchw/pooling_layer_quantized.cl142
-rw-r--r--src/gpu/cl/ClKernelLibrary.cpp16
-rw-r--r--src/gpu/cl/kernels/ClPool2dKernel.cpp255
-rw-r--r--src/gpu/cl/kernels/ClPool2dKernel.h2
-rw-r--r--src/gpu/cl/operators/ClPool2d.cpp52
-rw-r--r--src/gpu/cl/operators/ClPool2d.h8
9 files changed, 197 insertions, 998 deletions
diff --git a/SConscript b/SConscript
index 62ff266d34..df8f33a917 100644
--- a/SConscript
+++ b/SConscript
@@ -291,7 +291,6 @@ if env['opencl'] and env['embed_kernels']:
'src/core/CL/cl_kernels/common/quantization_layer.cl',
'src/core/CL/cl_kernels/common/range.cl',
'src/core/CL/cl_kernels/common/reduction_operation.cl',
- 'src/core/CL/cl_kernels/common/pooling_layer.cl',
'src/core/CL/cl_kernels/common/reshape_layer.cl',
'src/core/CL/cl_kernels/common/convolution_layer.cl',
'src/core/CL/cl_kernels/common/reverse.cl',
@@ -322,7 +321,6 @@ if env['opencl'] and env['embed_kernels']:
'src/core/CL/cl_kernels/nchw/normalize_planar_yuv_layer.cl',
'src/core/CL/cl_kernels/nchw/normalize_planar_yuv_layer_quantized.cl',
'src/core/CL/cl_kernels/nchw/pooling_layer.cl',
- 'src/core/CL/cl_kernels/nchw/pooling_layer_quantized.cl',
'src/core/CL/cl_kernels/nchw/prior_box_layer.cl',
'src/core/CL/cl_kernels/nchw/remap.cl',
'src/core/CL/cl_kernels/nchw/reorg_layer.cl',
diff --git a/src/core/CL/cl_kernels/common/pooling_layer.cl b/src/core/CL/cl_kernels/common/pooling_layer.cl
deleted file mode 100644
index 5122f2c251..0000000000
--- a/src/core/CL/cl_kernels/common/pooling_layer.cl
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-#include "repeat.h"
-#include "tile_helpers.h"
-
-#if defined(POOL_AVG) || defined(POOL_L2)
-#define POOL_OP(x, y) ((x) + (y))
-#else /* defined(POOL_AVG) || defined(POOL_L2) */
-#define POOL_OP(x, y) (fmax((x), (y)))
-#endif /* defined(POOL_AVG) || defined(POOL_L2) */
-
-#if defined(POOL_L2)
-#define POW2_OP(x, vec_size) ((x) * (x))
-#else /* defined(POOL_L2) */
-#define POW2_OP(x, vec_size) (x)
-#endif /* defined(POOL_L2) */
-
-#define DIV_OP(x, y) (x * (1.f / y))
-#define SQRT_OP(x) sqrt((x))
-
-#if STRIDE_X == 1
-#define POOLING3x3(res, input, output) POOLING3x3_STRIDE1(res, input, output)
-#elif STRIDE_X == 2 /* STRIDE_X == 1 */
-#define POOLING3x3(res, input, output) POOLING3x3_STRIDE2(res, input, output)
-#elif STRIDE_X == 3 /* STRIDE_X not equals 1 or 2 */
-#define POOLING3x3(res, input, output) POOLING3x3_STRIDE3(res, input, output)
-#endif /* STRIDE_X == 3 */
-
-#if defined(FP_MIXED_PRECISION)
-#define CONVERT_TO_ACC_DATA_TYPE(x, n) CONVERT(x, VEC_DATA_TYPE(ACC_DATA_TYPE, n))
-#define VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(n, offset, ptr) \
- CONVERT_TO_ACC_DATA_TYPE(vload##n(offset, ptr), n)
-#else /* defined(FP_MIXED_PRECISION) */
-#define VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(n, offset, ptr) vload##n(offset, ptr)
-#endif /* defined(FP_MIXED_PRECISION) */
-
-#define POOLING3x3_STRIDE1(res, input, output) \
- ({ \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- data00 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(4, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 2) \
- data01 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(2, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0) + 4); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- data10 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(4, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 2) \
- data11 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(2, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0) + 4); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- data20 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(4, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 2) \
- data21 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(2, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0) + 4); \
- data00 = POW2_OP(data00, 4); \
- data01 = POW2_OP(data01, 2); \
- data10 = POW2_OP(data10, 4); \
- data11 = POW2_OP(data11, 2); \
- data20 = POW2_OP(data20, 4); \
- data21 = POW2_OP(data21, 2); \
- \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- values00 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))(data00.s01212323); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- values01 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data01.s0, data00.s3, data01.s01); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- values10 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))(data10.s01212323); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- values11 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data11.s0, data10.s3, data11.s01); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- values20 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))(data20.s01212323); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- values21 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data21.s0, data20.s3, data21.s01); \
- \
- values00 = POOL_OP(values00, values10); \
- values01 = POOL_OP(values01, values11); \
- values00 = POOL_OP(values00, values20); \
- values01 = POOL_OP(values01, values21); \
- \
- res = POOL_OP((VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(values00.s036, values01.s1), (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(values00.s147, values01.s2)); \
- res = POOL_OP(res, (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(values00.s25, values01.s03)); \
- })
-
-#define POOLING3x3_STRIDE2(res, input, output) \
- ({ \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- data00 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0)); \
- ACC_DATA_TYPE data01 = (ACC_DATA_TYPE)(*((__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0) + 8)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- data10 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); \
- ACC_DATA_TYPE data11 = (ACC_DATA_TYPE)(*((__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0) + 8)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- data20 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); \
- ACC_DATA_TYPE data21 = (ACC_DATA_TYPE)(*((__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0) + 8)); \
- data00 = POW2_OP(data00, 8); \
- data01 = POW2_OP(data01, 1); \
- data10 = POW2_OP(data10, 8); \
- data11 = POW2_OP(data11, 1); \
- data20 = POW2_OP(data20, 8); \
- data21 = POW2_OP(data21, 1); \
- \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- values00 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))(data00.s01223445); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- values01 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data00.s667, data01); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- values10 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))(data10.s01223445); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- values11 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data10.s667, data11); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- values20 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))(data20.s01223445); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- values21 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data20.s667, data21); \
- \
- values00 = POOL_OP(values00, values10); \
- values01 = POOL_OP(values01, values11); \
- values00 = POOL_OP(values00, values20); \
- values01 = POOL_OP(values01, values21); \
- \
- res = POOL_OP((VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(values00.s036, values01.s1), (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(values00.s147, values01.s2)); \
- res = POOL_OP(res, (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(values00.s25, values01.s03)); \
- })
-
-#define POOLING3x3_STRIDE3(res, input, output) \
- ({ \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- data00 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- data01 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(4, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0) + 8); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- data10 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- data11 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(4, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0) + 8); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8) \
- data20 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); \
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4) \
- data21 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(4, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0) + 8); \
- data00 = POW2_OP(data00, 8); \
- data01 = POW2_OP(data01, 4); \
- data10 = POW2_OP(data10, 8); \
- data11 = POW2_OP(data11, 4); \
- data20 = POW2_OP(data20, 8); \
- data21 = POW2_OP(data21, 4); \
- \
- data00 = POOL_OP(data00, data10); \
- data01 = POOL_OP(data01, data11); \
- data00 = POOL_OP(data00, data20); \
- data01 = POOL_OP(data01, data21); \
- \
- res = POOL_OP((VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data00.s036, data01.s1), (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data00.s147, data01.s2)); \
- res = POOL_OP(res, (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(data00.s25, data01.s03)); \
- })
-
-ACC_DATA_TYPE calculate_avg_scale(const int pool_size_x, const int pool_size_y, const int upper_bound_w, const int upper_bound_h,
- const int pad_x, const int pad_y, const int stride_x, const int stride_y)
-{
- int start_x = get_global_id(0) * stride_x - pad_x;
- int start_y = get_global_id(1) * stride_y - pad_y;
- const int end_x = min(start_x + pool_size_x, upper_bound_w);
- const int end_y = min(start_y + pool_size_y, upper_bound_h);
-#if defined(EXCLUDE_PADDING)
- start_x = max(0, start_x);
- start_y = max(0, start_y);
-#endif /* defined(EXCLUDE_PADDING) */
- return ((end_y - start_y) * (end_x - start_x));
-}
-
-/** Performs a pooling function of pool size equal to 2.
- *
- * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are F16/F32;
- * @note In case of average pooling the following information must be passed at compile time:
- * -DPOOL_AVG or -DPOOL_L2 must be provided otherwise max pooling will be performed.
- * -DMAX_WIDTH and -DMAX_HEIGHT which are the maximum accessible indeces in x and y dimensions (width + pad)
- * -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * -DPAD_X and -DPAD_Y which are the pooling paddings in x and y dimension
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void pooling_layer_2(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
-
- // Load data
- VEC_DATA_TYPE(ACC_DATA_TYPE, 2)
- data0 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(2, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0));
- VEC_DATA_TYPE(ACC_DATA_TYPE, 2)
- data1 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(2, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0));
-
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data0 = POW2_OP(data0, 2);
- data1 = POW2_OP(data1, 2);
-#endif /* defined(POOL_L2) */
-
- // Perform calculations
- data0 = POOL_OP(data0, data1);
- ACC_DATA_TYPE res = POOL_OP(data0.s0, data0.s1);
-
-#if defined(POOL_AVG) || defined(POOL_L2)
- // Divide by pool region in case of average or l2 pooling
- res = DIV_OP(res, calculate_avg_scale(2, 2, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y));
-#endif /* defined(POOL_AVG) || defined(POOL_L2) */
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- res = SQRT_OP(res);
-#endif /* defined(POOL_L2) */
-
- // Store result
- *(__global DATA_TYPE *)output.ptr = (DATA_TYPE)res;
-}
-
-/** Performs a pooling function of pool size equal to 3
- *
- * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are F16/F32;
- * @note In case of average pooling the following information must be passed at compile time:
- * -DPOOL_AVG or -DPOOL_L2 must be provided otherwise max pooling will be performed.
- * -DMAX_WIDTH and -DMAX_HEIGHT which are the maximum accessible indeces in x and y dimensions (width + pad)
- * -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * -DPAD_X and -DPAD_Y which are the pooling paddings in x and y dimension
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void pooling_layer_3(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
-
- // Load data
- VEC_DATA_TYPE(ACC_DATA_TYPE, 3)
- data0 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(3, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0));
- VEC_DATA_TYPE(ACC_DATA_TYPE, 3)
- data1 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(3, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0));
- VEC_DATA_TYPE(ACC_DATA_TYPE, 3)
- data2 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(3, 0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0));
-
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data0 = POW2_OP(data0, 3);
- data1 = POW2_OP(data1, 3);
- data2 = POW2_OP(data2, 3);
-#endif /* defined(POOL_L2) */
-
- // Perform calculations
- data0 = POOL_OP(data0, data1);
- data0 = POOL_OP(data0, data2);
- ACC_DATA_TYPE res = POOL_OP(POOL_OP(data0.s0, data0.s1), data0.s2);
-
-#if defined(POOL_AVG) || defined(POOL_L2)
- // Divide by pool region in case of average pooling
- res = DIV_OP(res, calculate_avg_scale(3, 3, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y));
-#endif /* defined(POOL_AVG) || defined(POOL_L2) */
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- res = SQRT_OP(res);
-#endif /* defined(POOL_L2) */
-
- // Store result
- *(__global DATA_TYPE *)output.ptr = (DATA_TYPE)res;
-}
-
-#if defined(POOLING3x3)
-
-#define CONVERT_OP(data_type) convert_##data_type##4
-#define CONVERT_VECTOR4(data_type) CONVERT_OP(data_type)
-
-VEC_DATA_TYPE(ACC_DATA_TYPE, 4)
-calculate_avg_scale4(const int pool_size, const int upper_bound_w, const int upper_bound_h,
- const int pad_x, const int pad_y, const int stride_x, const int stride_y)
-{
- int4 start_x = ((int4)get_global_id(0) * 4 + (int4)(0, 1, 2, 3)) * (int4)stride_x - (int4)pad_x;
- int start_y = get_global_id(1) * stride_y - pad_y;
- const int4 end_x = min(start_x + (int4)pool_size, (int4)upper_bound_w);
- const int end_y = min(start_y + pool_size, upper_bound_h);
-#if defined(EXCLUDE_PADDING)
- start_x = max((int4)0, start_x);
- start_y = max(0, start_y);
-#endif /* defined(EXCLUDE_PADDING) */
- return (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))(1.f) / CONVERT_VECTOR4(ACC_DATA_TYPE)(((int4)(end_y - start_y)) * (end_x - start_x));
-}
-
-/** Performs an optimized pooling function of pool size equal to 3 when the stride_x is less equal than 3
- *
- * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are F16/F32;
- * @note In case of average pooling the following information must be passed at compile time:
- * -DPOOL_AVG or -DPOOL_L2 must be provided otherwise max pooling will be performed.
- * -DMAX_WIDTH and -DMAX_HEIGHT which are the maximum accessible indeces in x and y dimensions (width + pad)
- * -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * -DPAD_X and -DPAD_Y which are the pooling paddings in x and y dimension
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- */
-__kernel void pooling_layer_optimized_3(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
-
- VEC_DATA_TYPE(ACC_DATA_TYPE, 4)
- res;
-
- // Perform pooling 3x3 for 4 output elements
- POOLING3x3(res, input, output);
-
-#if defined(POOL_AVG) || defined(POOL_L2)
- // Divide by pool region in case of average pooling
- res *= calculate_avg_scale4(3, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y);
-#endif /* defined(POOL_AVG) || defined(POOL_L2) */
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- res = SQRT_OP(res);
-#endif /* defined(POOL_L2) */
-
- vstore4(CONVERT(res, VEC_DATA_TYPE(DATA_TYPE, 4)), 0, (__global DATA_TYPE *)output.ptr);
-}
-#endif // defined(POOLING3x3)
diff --git a/src/core/CL/cl_kernels/nchw/pooling_layer.cl b/src/core/CL/cl_kernels/nchw/pooling_layer.cl
index 790ddb381a..15ad116289 100644
--- a/src/core/CL/cl_kernels/nchw/pooling_layer.cl
+++ b/src/core/CL/cl_kernels/nchw/pooling_layer.cl
@@ -22,13 +22,15 @@
* SOFTWARE.
*/
#include "helpers.h"
-#include "repeat.h"
-#include "tile_helpers.h"
#if defined(POOL_AVG) || defined(POOL_L2)
#define POOL_OP(x, y) ((x) + (y))
#else /* defined(POOL_AVG) || defined(POOL_L2) */
+#if defined(QUANTIZED)
+#define POOL_OP(x, y) (max((x), (y)))
+#else // defined(QUANTIZED)
#define POOL_OP(x, y) (fmax((x), (y)))
+#endif // defined(QUANTIZED)
#endif /* defined(POOL_AVG) || defined(POOL_L2) */
#if defined(POOL_L2)
@@ -40,13 +42,12 @@
#define DIV_OP(x, y) (x * (1.f / y))
#define SQRT_OP(x) sqrt((x))
-#if defined(FP_MIXED_PRECISION)
+#if defined(FP_MIXED_PRECISION) || defined(QUANTIZED)
#define CONVERT_TO_ACC_DATA_TYPE(x, n) CONVERT(x, VEC_DATA_TYPE(ACC_DATA_TYPE, n))
-#define VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(n, offset, ptr) \
- CONVERT_TO_ACC_DATA_TYPE(vload##n(offset, ptr), n)
-#else /* defined(FP_MIXED_PRECISION) */
+#define VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(n, offset, ptr) CONVERT_TO_ACC_DATA_TYPE(vload##n(offset, ptr), n)
+#else /* defined(FP_MIXED_PRECISION) || defined(QUANTIZED)*/
#define VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(n, offset, ptr) vload##n(offset, ptr)
-#endif /* defined(FP_MIXED_PRECISION) */
+#endif /* defined(FP_MIXED_PRECISION) || defined(QUANTIZED)*/
ACC_DATA_TYPE calculate_avg_scale(const int pool_size_x, const int pool_size_y, const int upper_bound_w, const int upper_bound_h,
const int pad_x, const int pad_y, const int stride_x, const int stride_y)
@@ -66,7 +67,7 @@ ACC_DATA_TYPE calculate_avg_scale(const int pool_size_x, const int pool_size_y,
/** Performs a pooling function of pool size equal to N (NCHW)
*
- * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are F16/F32;
+ * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=float. Supported data types are F16/F32/QASYMM8;
* @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13;
* @note In case of average pooling the following information must be passed at compile time:
* -DPOOL_AVG must be provided otherwise max pooling will be performed.
@@ -75,59 +76,93 @@ ACC_DATA_TYPE calculate_avg_scale(const int pool_size_x, const int pool_size_y,
* -DPAD_X and -DPAD_Y which are the pooling paddings in x and y dimension
* @note The initial value for the pooling operation must be passed at compile time using -DINITIAL_VALUE e.g. -DINITIAL_VALUE=0
*
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32/QASYMM8
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
__kernel void pooling_layer_MxN_nchw(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst))
{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+ int id0 = get_global_id(0);
+ int id1 = get_global_id(1);
+ int id2 = get_global_id(2);
+
+ int x_coords = (id0 * STRIDE_X) - PAD_X;
+ int y_coords = (id1 * STRIDE_Y) - PAD_Y;
+
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + y_coords * (int)src_stride_y + id2 * src_stride_z;
VEC_DATA_TYPE(ACC_DATA_TYPE, 8)
vdata = INITIAL_VALUE;
ACC_DATA_TYPE sdata = INITIAL_VALUE;
+ const int end_x = min((int)POOL_SIZE_X, (int)(SRC_WIDTH - x_coords));
+ const int end_y = min((int)POOL_SIZE_Y, (int)(SRC_HEIGHT - y_coords));
+
// Load data
- for(int y = 0; y < POOL_SIZE_Y; y++)
+ for(int y = 0; y < end_y; ++y)
{
- int x = 0;
- for(; x <= ((int)POOL_SIZE_X - 8); x += 8)
+ if((y_coords + y) >= 0)
{
- VEC_DATA_TYPE(ACC_DATA_TYPE, 8)
- data0 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)tensor3D_offset(&input, x, y, 0));
+ int x = 0;
+ for(; x <= (end_x - 8); x += 8)
+ {
+ int8 src_x = (int8)(x_coords + x) + VEC_OFFS(int, 8);
+#if defined(POOL_AVG) || defined(POOL_L2)
+ SELECT_VEC_DATA_TYPE(ACC_DATA_TYPE, 8)
+ cond_x = CONVERT(src_x < 0, SELECT_VEC_DATA_TYPE(ACC_DATA_TYPE, 8));
+ src_x = clamp(src_x, (int8)0, (int8)(SRC_WIDTH - 1));
+ VEC_DATA_TYPE(ACC_DATA_TYPE, 8)
+ data0 = select(VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)(src_addr + src_x.s0 * sizeof(DATA_TYPE) + y * src_stride_y)), (VEC_DATA_TYPE(ACC_DATA_TYPE, 8))0, REVERSE(cond_x, 8));
+#else // defined(POOL_AVG) || defined(POOL_L2)
+ src_x = clamp(src_x, 0, SRC_WIDTH - 1);
+ VEC_DATA_TYPE(ACC_DATA_TYPE, 8)
+ data0 = VLOAD_AND_CONVERT_TO_ACC_DATA_TYPE(8, 0, (__global DATA_TYPE *)(src_addr + src_x.s0 * sizeof(DATA_TYPE) + y * src_stride_y));
+#endif // defined(POOL_AVG) || defined(POOL_L2
+
#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data0 *= data0;
+ // Raise to power of 2 for L2 Pooling
+ data0 *= data0;
#endif /* defined(POOL_L2) */
- vdata = POOL_OP(vdata, data0);
- }
- // Leftover
- for(; x < (int)POOL_SIZE_X; ++x)
- {
- ACC_DATA_TYPE data0 = (ACC_DATA_TYPE)(*((__global DATA_TYPE *)tensor3D_offset(&input, x, y, 0)));
+ vdata = POOL_OP(vdata, data0);
+ }
+
+ // Leftover
+ for(; x < end_x; ++x)
+ {
+ int src_x = x_coords + x;
+#if defined(POOL_AVG) || defined(POOL_L2)
+ SELECT_DATA_TYPE(ACC_DATA_TYPE)
+ cond_x = (src_x < 0);
+ src_x = clamp(src_x, 0, SRC_WIDTH - 1);
+ ACC_DATA_TYPE data0 = select((ACC_DATA_TYPE)(*((__global DATA_TYPE *)(src_addr + src_x * sizeof(DATA_TYPE) + y * src_stride_y))), (ACC_DATA_TYPE)0, cond_x);
+#else // defined(POOL_AVG) || defined(POOL_L2)
+ src_x = clamp(src_x, 0, SRC_WIDTH - 1);
+ ACC_DATA_TYPE data0 = (ACC_DATA_TYPE)(*((__global DATA_TYPE *)(src_addr + src_x * sizeof(DATA_TYPE) + y * src_stride_y)));
+#endif // defined(POOL_AVG) || defined(POOL_L2)
+
#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data0 *= data0;
+ // Raise to power of 2 for L2 Pooling
+ data0 *= data0;
#endif /* defined(POOL_L2) */
- sdata = POOL_OP(sdata, data0);
+
+ sdata = POOL_OP(sdata, data0);
+ }
}
}
@@ -144,76 +179,61 @@ __kernel void pooling_layer_MxN_nchw(
res = DIV_OP(res, calculate_avg_scale(POOL_SIZE_X, POOL_SIZE_Y, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y));
#endif /* defined(POOL_AVG) || defined(POOL_L2) */
+#if defined(QUANTIZED)
+
+ DATA_TYPE result_q8 = CONVERT(res, DATA_TYPE);
+
+#if defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT)
+
+ const float result_f32 = convert_float(result_q8);
+ const float input_offset = (float)OFFSET_IN1;
+ const float input_scale = (float)SCALE_IN1;
+ const float scale_out = (float)SCALE_OUT;
+ const float offset_out = (float)OFFSET_OUT;
+ const float in_f32 = (result_f32 - input_offset) * input_scale;
+ const float out_f32 = in_f32 / scale_out + offset_out;
+ result_q8 = CONVERT_SAT(convert_int_rte(out_f32), DATA_TYPE);
+
+#endif /* defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) */
+
+ *(__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + id0 * sizeof(DATA_TYPE) + id1 * dst_stride_y + id2 * dst_stride_z) = result_q8;
+
+#else // defined(QUANTIZED)
+
#if defined(POOL_L2)
// Take square root of the result in L2 pooling
res = SQRT_OP(res);
#endif /* defined(POOL_L2) */
// Store result
- *(__global DATA_TYPE *)output.ptr = (DATA_TYPE)res;
+ *(__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + id0 * sizeof(DATA_TYPE) + id1 * dst_stride_y + id2 * dst_stride_z) = (DATA_TYPE)res;
+#endif // defined(QUANTIZED)
}
#endif // defined(POOL_SIZE_X) && defined(POOL_SIZE_Y)
-#if defined(PAD_TENSOR_LEFT) && defined(PAD_TENSOR_RIGHT) && defined(PAD_TENSOR_TOP) && defined(PAD_TENSOR_BOTTOM)
-
-inline void offset_no_padding_nchw(const Tensor3D *input, uint *offset_top, uint *offset_bottom)
-{
- const int pad_horiz = PAD_TENSOR_LEFT + PAD_TENSOR_RIGHT;
- const int pad_vert = PAD_TENSOR_TOP + PAD_TENSOR_BOTTOM;
-
- const int x = get_global_id(0) * STRIDE_X;
- const int y = get_global_id(1) * STRIDE_Y;
- const int z = get_global_id(2);
-
- //x axis: width, y axis: height, z axis: component
- const uint padded_offset = input->offset_first_element_in_bytes
- + x * input->stride_x
- + y * input->stride_y
- + z * input->stride_z;
-
- const uint offset_base = padded_offset
- - y * pad_horiz * sizeof(DATA_TYPE) /* Horizontal padding for each row */
- - PAD_TENSOR_TOP * input->stride_y /* top padding */
- - z * MAX_HEIGHT * pad_horiz * sizeof(DATA_TYPE) - z * pad_vert * input->stride_y /* Z plane padding */
- - PAD_TENSOR_LEFT * sizeof(DATA_TYPE);
-
-#if defined(TENSOR_CHANNEL) && defined(TENSOR_WIDTH) && defined(TENSOR_HEIGHT)
- *offset_top = (uint)((offset_base / sizeof(DATA_TYPE)) % (TENSOR_CHANNEL * TENSOR_WIDTH * TENSOR_HEIGHT));
-#else /* defined(TENSOR_CHANNEL) && defined(TENSOR_WIDTH) && defined(TENSOR_HEIGHT) */
- *offset_top = (uint)(offset_base / sizeof(DATA_TYPE));
-#endif /* defined(TENSOR_CHANNEL) && defined(TENSOR_WIDTH) && defined(TENSOR_HEIGHT) */
-
- *offset_bottom = *offset_top + input->stride_y / sizeof(DATA_TYPE) - pad_horiz;
-
- return;
-}
-
-#endif //defined(PAD_TENSOR_LEFT) && defined(PAD_TENSOR_RIGHT) && defined(PAD_TENSOR_TOP) && defined(PAD_TENSOR_BOTTOM)
-
/** Performs a MAX pooling of pool size equal to 2, and record max value indices for NCHW.
*
* @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types are F32
* @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13;
* @note Tensors width and height must be passed at compile time using -DMAX_WIDTH and -DMAX_HEIGHT
* @note Pool strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * @note Tensor padding values must be passed at compile time using PAD_TENSOR_LEFT, PAD_TENSOR_RIGHT, PAD_TENSOR_TOP and PAD_TENSOR_BOTTOM
*
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F32
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
* @param[in] indices_ptr Pointer to the indices tensor. Supported data types: U32
* @param[in] indices_stride_x Stride of the indices tensor in X dimension (in bytes)
* @param[in] indices_step_x indices_stride_x * number of elements along X processed per workitem(in bytes)
@@ -223,109 +243,43 @@ inline void offset_no_padding_nchw(const Tensor3D *input, uint *offset_top, uint
* @param[in] indices_step_z indices_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] indices_offset_first_element_in_bytes The offset of the first element in the indices tensor
*/
-__kernel void pooling_layer_2_nchw_indices_fp32(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output),
+__kernel void pooling_layer_2_nchw_indices(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
TENSOR3D_DECLARATION(indices))
{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
- Tensor3D indices = CONVERT_TO_TENSOR3D_STRUCT(indices);
-
- // Load data
- float2 data0 = VLOAD(2)(0, (__global float *)tensor3D_offset(&input, 0, 0, 0));
- float2 data1 = VLOAD(2)(0, (__global float *)tensor3D_offset(&input, 0, 1, 0));
-
- // Perform calculations
- float data0_max = POOL_OP(data0.s0, data0.s1);
- float data1_max = POOL_OP(data1.s0, data1.s1);
- float res = POOL_OP(data0_max, data1_max);
- // Store result
- *(__global float *)output.ptr = res;
+ int id0 = get_global_id(0);
+ int id1 = get_global_id(1);
+ int id2 = get_global_id(2);
-#if defined(PAD_TENSOR_LEFT) && defined(PAD_TENSOR_RIGHT) && defined(PAD_TENSOR_TOP) && defined(PAD_TENSOR_BOTTOM)
+ int2 x_coords = clamp((int2)((id0 * STRIDE_X) - PAD_X), (int2)0, (int2)(SRC_WIDTH - 1));
+ int2 y_coords = clamp((int2)((id1 * STRIDE_Y) - PAD_Y) + VEC_OFFS(int, 2), (int2)0, (int2)(SRC_HEIGHT - 1));
- uint offset_top = 0;
- uint offset_bottom = 0;
-
- offset_no_padding_nchw(&input, &offset_top, &offset_bottom);
-
- uint index0 = select(offset_top + 1, offset_top, isgreaterequal(data0.s0, data0.s1));
- uint index1 = select(offset_bottom + 1, offset_bottom, isgreaterequal(data1.s0, data1.s1));
- uint index = select(index1, index0, isgreaterequal(data0_max, data1_max));
-
- *(__global uint *)indices.ptr = index;
-
-#endif //defined(PAD_TENSOR_LEFT) && defined(PAD_TENSOR_RIGHT) && defined(PAD_TENSOR_TOP) && defined(PAD_TENSOR_BOTTOM)
-}
-
-/** Performs a MAX pooling of pool size equal to 2, and record max value indices for NCHW.
- *
- * @note Datatype must be passed using -DDATA_TYPE e.g. -DDATA_TYPE=half. Supported data types are F16
- * @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13;
- * @note Tensors width and height must be passed at compile time using -DMAX_WIDTH and -DMAX_HEIGHT
- * @note Pool strides must be passed at compile time using -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * @note Tensor padding values must be passed at compile time using PAD_TENSOR_LEFT, PAD_TENSOR_RIGHT, PAD_TENSOR_TOP and PAD_TENSOR_BOTTOM
- *
- * @param[in] input_ptr Pointer to the source tensor. Supported data types: F16
- * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] indices_ptr Pointer to the indices tensor. Supported data types: U32
- * @param[in] indices_stride_x Stride of the indices tensor in X dimension (in bytes)
- * @param[in] indices_step_x indices_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] indices_stride_y Stride of the indices tensor in Y dimension (in bytes)
- * @param[in] indices_step_y indices_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] indices_stride_z Stride of the indices tensor in Z dimension (in bytes)
- * @param[in] indices_step_z indices_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] indices_offset_first_element_in_bytes The offset of the first element in the indices tensor
- */
-__kernel void pooling_layer_2_nchw_indices_fp16(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output),
- TENSOR3D_DECLARATION(indices))
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
- Tensor3D indices = CONVERT_TO_TENSOR3D_STRUCT(indices);
+ __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + id2 * src_stride_z;
// Load data
- half2 data0 = VLOAD(2)(0, (__global half *)tensor3D_offset(&input, 0, 0, 0));
- half2 data1 = VLOAD(2)(0, (__global half *)tensor3D_offset(&input, 0, 1, 0));
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ data0 = VLOAD(2)(0, (__global DATA_TYPE *)(src_addr + x_coords.s0 * sizeof(DATA_TYPE) + y_coords.s0 * (int)src_stride_y));
+ VEC_DATA_TYPE(DATA_TYPE, 2)
+ data1 = VLOAD(2)(0, (__global DATA_TYPE *)(src_addr + x_coords.s1 * sizeof(DATA_TYPE) + y_coords.s1 * (int)src_stride_y));
// Perform calculations
- half data0_max = POOL_OP(data0.s0, data0.s1);
- half data1_max = POOL_OP(data1.s0, data1.s1);
- half res = POOL_OP(data0_max, data1_max);
+ DATA_TYPE data0_max = POOL_OP(data0.s0, data0.s1);
+ DATA_TYPE data1_max = POOL_OP(data1.s0, data1.s1);
+ DATA_TYPE res = POOL_OP(data0_max, data1_max);
// Store result
- *(__global half *)output.ptr = res;
-
-#if defined(PAD_TENSOR_LEFT) && defined(PAD_TENSOR_RIGHT) && defined(PAD_TENSOR_TOP) && defined(PAD_TENSOR_BOTTOM)
+ *(__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + id0 * sizeof(DATA_TYPE) + id1 * dst_stride_y + id2 * dst_stride_z) = res;
- uint offset_top = 0;
- uint offset_bottom = 0;
+#if defined(SRC_BATCH)
- offset_no_padding_nchw(&input, &offset_top, &offset_bottom);
+ uint offset_top = (x_coords.s0 + y_coords.s0 * SRC_WIDTH + id2 * (SRC_WIDTH * SRC_HEIGHT)) % SRC_BATCH;
+ uint offset_bottom = offset_top + SRC_WIDTH;
uint index0 = select(offset_top + 1, offset_top, isgreaterequal(data0.s0, data0.s1));
uint index1 = select(offset_bottom + 1, offset_bottom, isgreaterequal(data1.s0, data1.s1));
uint index = select(index1, index0, isgreaterequal(data0_max, data1_max));
- *(__global uint *)indices.ptr = index;
+ *(__global uint *)(indices_ptr + indices_offset_first_element_in_bytes + id0 * sizeof(uint) + id1 * indices_stride_y + id2 * indices_stride_z) = index;
-#endif //defined(PAD_TENSOR_LEFT) && defined(PAD_TENSOR_RIGHT) && defined(PAD_TENSOR_TOP) && defined(PAD_TENSOR_BOTTOM)
+#endif // defined(SRC_BATCH)
} \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/nchw/pooling_layer_quantized.cl b/src/core/CL/cl_kernels/nchw/pooling_layer_quantized.cl
deleted file mode 100644
index 1440ef3ed1..0000000000
--- a/src/core/CL/cl_kernels/nchw/pooling_layer_quantized.cl
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (c) 2017-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#if defined(DATA_TYPE) && defined(INITIAL_VALUE)
-#define VEC_TYPE(VEC_SIZE) VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
-
-#if defined(POOL_AVG)
-#define POOL_OP(x, y) ((x) + (y))
-#else /* defined(POOL_AVG) */
-#define POOL_OP(x, y) (max((x), (y)))
-#endif /* defined(POOL_AVG) */
-
-#define DIV_OP(x, y) (x * (1.f / y))
-
-#if defined(POOL_L2)
-#error "L2 pooling is not supported"
-#endif /* defined(POOL_L2) */
-
-int calculate_avg_scale(const int pool_size_x, const int pool_size_y, const int upper_bound_w, const int upper_bound_h,
- const int pad_x, const int pad_y, const int stride_x, const int stride_y)
-{
- int start_x = get_global_id(0) * stride_x - pad_x;
- int start_y = get_global_id(1) * stride_y - pad_y;
- const int end_x = min(start_x + pool_size_x, upper_bound_w);
- const int end_y = min(start_y + pool_size_y, upper_bound_h);
-#if defined(EXCLUDE_PADDING)
- start_x = max(0, start_x);
- start_y = max(0, start_y);
-#endif /* defined(EXCLUDE_PADDING) */
- return ((end_y - start_y) * (end_x - start_x));
-}
-
-/** Performs a pooling function of pool size equal to N (NCHW)
- *
- * @note Pool sizes must be passed using -DPOOL_SIZE_X and -DPOOL_SIZE_Y e.g. -DPOOL_SIZE_X=13;
- * @note In case of average pooling the following information must be passed at compile time:
- * -DPOOL_AVG must be provided otherwise max pooling will be performed.
- * -DMAX_WIDTH and -DMAX_HEIGHT which are the maximum accessible indeces in x and y dimensions (width + pad)
- * -DSTRIDE_X and -DSTRIDE_Y which are the steps of the window along the x and y directions
- * -DPAD_X and -DPAD_Y which are the pooling paddings in x and y dimension
- * @note Input data type must be passed at compile time using -DDAT_TYPE=type, e.g. -DDATA_TYPE=uchar
- * @note The initial value for the pooling operation must be passed at compile time using -DINITIAL_VALUE e.g. -DINITIAL_VALUE=0
- *
- * @param[in] input_ptr Pointer to the source image. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] input_stride_x Stride of the source image in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
- */
-__kernel void pooling_layer_MxN_quantized_nchw(
- TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(output))
-{
- // Get pixels pointer
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
-
- int8 vdata = INITIAL_VALUE;
- int sdata = INITIAL_VALUE;
-
- // Load data
- for(int y = 0; y < POOL_SIZE_Y; y++)
- {
- int x = 0;
- for(; x <= ((int)POOL_SIZE_X - 8); x += 8)
- {
- VEC_TYPE(8)
- data = vload8(0, (__global DATA_TYPE *)tensor3D_offset(&input, x, y, 0));
- int8 data0 = convert_int8(data);
- vdata = POOL_OP(vdata, data0);
- }
-
- // Leftover
- for(; x < (int)POOL_SIZE_X; ++x)
- {
- DATA_TYPE data = *((__global DATA_TYPE *)tensor3D_offset(&input, x, y, 0));
- int data0 = convert_int(data);
- sdata = POOL_OP(sdata, data0);
- }
- }
-
- // Reduce result
- int4 reduce4 = POOL_OP(vdata.s0123, vdata.s4567);
- int2 reduce2 = POOL_OP(reduce4.s01, reduce4.s23);
- int res = POOL_OP(reduce2.s0, reduce2.s1);
- res = POOL_OP(res, sdata);
-
-#if defined(POOL_AVG)
- res = round(DIV_OP(res, calculate_avg_scale(POOL_SIZE_X, POOL_SIZE_Y, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y)));
-#endif /* defined(POOL_AVG) */
-
- DATA_TYPE result_q8 = CONVERT(res, DATA_TYPE);
-
-#if defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT)
-
- const float result_f32 = convert_float(result_q8);
- const float input_offset = (float)OFFSET_IN1;
- const float input_scale = (float)SCALE_IN1;
- const float scale_out = (float)SCALE_OUT;
- const float offset_out = (float)OFFSET_OUT;
- const float in_f32 = (result_f32 - input_offset) * input_scale;
- const float out_f32 = in_f32 / scale_out + offset_out;
- result_q8 = CONVERT_SAT(convert_int_rte(out_f32), DATA_TYPE);
-
-#endif /* defined(OFFSET_IN1) && defined(OFFSET_OUT) && defined(SCALE_IN1) && defined(SCALE_OUT) */
-
- *(__global DATA_TYPE *)output.ptr = result_q8;
-}
-#endif // defined(DATA_TYPE) && defined(INITIAL_VALUE) \ No newline at end of file
diff --git a/src/gpu/cl/ClKernelLibrary.cpp b/src/gpu/cl/ClKernelLibrary.cpp
index 5cd969e7f2..c05bb96753 100644
--- a/src/gpu/cl/ClKernelLibrary.cpp
+++ b/src/gpu/cl/ClKernelLibrary.cpp
@@ -328,10 +328,6 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map =
{ "pixelwise_mul_float", "common/pixelwise_mul_float.cl" },
{ "pixelwise_mul_int", "common/pixelwise_mul_int.cl" },
{ "pixelwise_mul_quantized", "common/pixelwise_mul_int.cl" },
- { "pooling_layer_2", "common/pooling_layer.cl" },
- { "pooling_layer_3", "common/pooling_layer.cl" },
- { "pooling_layer_optimized_3", "common/pooling_layer.cl" },
- { "pooling_layer_7", "common/pooling_layer.cl" },
{ "qlstm_layer_normalization", "common/qlstm_layer_normalization.cl" },
{ "quantization_layer", "common/quantization_layer.cl" },
{ "range", "common/range.cl" },
@@ -385,9 +381,7 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map =
{ "normalize_planar_yuv_layer_nchw", "nchw/normalize_planar_yuv_layer.cl" },
{ "normalize_planar_yuv_layer_q8_nchw", "nchw/normalize_planar_yuv_layer_quantized.cl" },
{ "pooling_layer_MxN_nchw", "nchw/pooling_layer.cl" },
- { "pooling_layer_2_nchw_indices_fp32", "nchw/pooling_layer.cl" },
- { "pooling_layer_2_nchw_indices_fp16", "nchw/pooling_layer.cl" },
- { "pooling_layer_MxN_quantized_nchw", "nchw/pooling_layer_quantized.cl" },
+ { "pooling_layer_2_nchw_indices", "nchw/pooling_layer.cl" },
{ "prior_box_layer_nchw", "nchw/prior_box_layer.cl" },
{ "remap_nearest_neighbour_nchw", "nchw/remap.cl" },
{ "remap_bilinear_nchw", "nchw/remap.cl" },
@@ -668,10 +662,6 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map =
#include "./cl_kernels/common/pixelwise_mul_int.clembed"
},
{
- "common/pooling_layer.cl",
-#include "./cl_kernels/common/pooling_layer.clembed"
- },
- {
"common/qlstm_layer_normalization.cl",
#include "./cl_kernels/common/qlstm_layer_normalization.clembed"
},
@@ -805,10 +795,6 @@ const std::map<std::string, std::string> ClKernelLibrary::_program_source_map =
#include "./cl_kernels/nchw/pooling_layer.clembed"
},
{
- "nchw/pooling_layer_quantized.cl",
-#include "./cl_kernels/nchw/pooling_layer_quantized.clembed"
- },
- {
"nchw/prior_box_layer.cl",
#include "./cl_kernels/nchw/prior_box_layer.clembed"
},
diff --git a/src/gpu/cl/kernels/ClPool2dKernel.cpp b/src/gpu/cl/kernels/ClPool2dKernel.cpp
index 04f2b142bd..5e53799f30 100644
--- a/src/gpu/cl/kernels/ClPool2dKernel.cpp
+++ b/src/gpu/cl/kernels/ClPool2dKernel.cpp
@@ -23,18 +23,13 @@
*/
#include "src/gpu/cl/kernels/ClPool2dKernel.h"
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "support/Cast.h"
-#include "support/StringSupport.h"
namespace arm_compute
{
@@ -46,19 +41,6 @@ using namespace arm_compute::misc::shape_calculator;
namespace
{
-// Internal window config info
-using ClPoolingConfig = std::pair<unsigned int, BorderSize>; //num_elems_processed_per_iteration, border_size
-
-void auto_init(const ITensorInfo *src, ITensorInfo *dst, ITensorInfo *indices, PoolingLayerInfo pool_info)
-{
- TensorShape out_shape = compute_pool_shape(*src, pool_info);
- auto_init_if_empty(*dst, src->clone()->set_tensor_shape(out_shape));
- if(indices)
- {
- auto_init_if_empty(*indices, src->clone()->set_tensor_shape(out_shape).set_data_type(DataType::U32));
- }
-}
-
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
@@ -104,102 +86,6 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const
return Status{};
}
-
-std::tuple<Status, Window, ClPoolingConfig> validate_and_configure_window(ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices = nullptr)
-{
- ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
-
- // Get data layout
- const DataLayout data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
- const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
-
- int pool_stride_x = 0;
- int pool_stride_y = 0;
- unsigned int pooled_w = 0;
- unsigned int pooled_h = 0;
- int pool_size_x = pool_info.is_global_pooling ? src->dimension(idx_width) : pool_info.pool_size.width;
- int pool_size_y = pool_info.is_global_pooling ? src->dimension(idx_height) : pool_info.pool_size.height;
- const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
- std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
- const int pool_pad_right = pad_stride_info.pad_right();
- const int pool_pad_top = pad_stride_info.pad_top();
- const int pool_pad_left = pad_stride_info.pad_left();
- const int pool_pad_bottom = pad_stride_info.pad_bottom();
- BorderSize border_size = BorderSize();
-
- auto_init(src, dst, indices, pool_info);
- pooled_w = dst->tensor_shape()[idx_width];
- pooled_h = dst->tensor_shape()[idx_height];
-
- const DataType data_type = src->data_type();
-
- const int src_width = src->dimension(idx_width);
- const int src_height = src->dimension(idx_height);
-
- unsigned int num_elems_processed_per_iteration = 0;
- bool window_changed = false;
- Window win{};
- switch(data_layout)
- {
- case DataLayout::NCHW:
- {
- // Initialize border size
- border_size = BorderSize(pool_pad_top, pool_pad_right, pool_pad_bottom, pool_pad_left);
- // Change the number of elements processed per iteration
- // for pooling 3x3 with stride less equal than 3
- const bool can_optimize = (pool_size_x == 3) && (pool_size_y == 3) && (pool_stride_x <= 3) && !is_data_type_quantized(data_type);
- num_elems_processed_per_iteration = can_optimize ? 4 : 1;
- const unsigned int num_elems_read_per_iteration = (num_elems_processed_per_iteration - 1) * pool_stride_x + pool_size_x;
-
- // Number of iterations in X dimension
- const int num_iterations_x = (pooled_w + num_elems_processed_per_iteration - 1) / num_elems_processed_per_iteration;
-
- // Upper limit for the number of right/bottom border elements that are accessed
- const int upper_bound_w = ((num_iterations_x - 1) * num_elems_processed_per_iteration * pool_stride_x - pool_pad_left + num_elems_read_per_iteration) - src_width;
- const int upper_bound_h = ((pooled_h - 1) * pool_stride_y - pool_pad_top + pool_size_y) - src_height;
-
- border_size.right = std::max(upper_bound_w, pool_pad_right);
- border_size.bottom = std::max(upper_bound_h, pool_pad_bottom);
-
- win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration));
-
- AccessWindowRectangle src_access(src, -pool_pad_left, -pool_pad_top, num_elems_read_per_iteration, pool_size_y,
- pool_stride_x, pool_stride_y);
- AccessWindowHorizontal dst_access(dst, 0, num_elems_processed_per_iteration);
-
- // Update indices window
- if(indices)
- {
- AccessWindowHorizontal indices_access(indices, 0, num_elems_processed_per_iteration);
- window_changed = update_window_and_padding(win, src_access, dst_access, indices_access);
- indices_access.set_valid_region(win, ValidRegion(Coordinates(), indices->tensor_shape()));
- }
- else
- {
- window_changed = update_window_and_padding(win, src_access, dst_access);
- }
-
- dst_access.set_valid_region(win, ValidRegion(Coordinates(), dst->tensor_shape()));
- break;
- }
- case DataLayout::NHWC:
- {
- const size_t vec_size = dst->data_type() == DataType::F32 ? 2 : 4;
-
- // Initialize border size
- border_size = BorderSize();
- num_elems_processed_per_iteration = adjust_vec_size(vec_size, dst->dimension(0));
- win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration));
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not implemented");
- }
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_tuple(err, win, ClPoolingConfig(num_elems_processed_per_iteration, border_size));
-}
} // namespace
ClPool2dKernel::ClPool2dKernel()
@@ -207,20 +93,27 @@ ClPool2dKernel::ClPool2dKernel()
_type = CLKernelType::POOL;
}
-BorderSize ClPool2dKernel::border_size() const
-{
- return _border_size;
-}
-
void ClPool2dKernel::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, pool_info, indices));
auto padding_info = get_padding_info({ src, dst, indices });
+ // Auto init if empty
+ TensorShape out_shape = compute_pool_shape(*src, pool_info);
+ auto_init_if_empty(*dst, src->clone()->set_tensor_shape(out_shape));
+ if(indices)
+ {
+ auto_init_if_empty(*indices, src->clone()->set_tensor_shape(out_shape).set_data_type(DataType::U32));
+ }
+
// Set instance variables
- _pool_info = pool_info;
- _data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
+ _pool_info = pool_info;
+ _data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : pool_info.data_layout;
+ _num_elems_processed_per_iteration = (_data_layout == DataLayout::NCHW) ? 1 : ((dst->data_type() == DataType::F32) ? 2 : 4);
+ _num_elems_processed_per_iteration = adjust_vec_size(_num_elems_processed_per_iteration, dst->dimension(0));
+
int pool_stride_x = 0;
int pool_stride_y = 0;
const PoolingType pool_type = pool_info.pool_type;
@@ -233,61 +126,47 @@ void ClPool2dKernel::configure(const ClCompileContext &compile_context, ITensorI
const PadStrideInfo pad_stride_info = pool_info.pad_stride_info;
const bool exclude_padding = pool_info.exclude_padding;
std::tie(pool_stride_x, pool_stride_y) = pad_stride_info.stride();
- const int pool_pad_top = pad_stride_info.pad_top();
- const int pool_pad_left = pad_stride_info.pad_left();
+ const int pool_pad_top = pad_stride_info.pad_top();
+ const int pool_pad_left = pad_stride_info.pad_left();
+ const DataType data_type = src->data_type();
// Set build options
CLBuildOptions build_opts;
- const DataType data_type = src->data_type();
-
- // Configure kernel window
- auto win_config = validate_and_configure_window(src, dst, pool_info, indices);
-
- ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
- ICLKernel::configure_internal(std::get<1>(win_config));
-
- ClPoolingConfig pooling_config = std::get<2>(win_config);
- _num_elems_processed_per_iteration = pooling_config.first;
- _border_size = pooling_config.second;
-
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(_num_elems_processed_per_iteration));
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
+ build_opts.add_option("-DPOOL_" + string_from_pooling_type(pool_type));
+ build_opts.add_option("-DSTRIDE_X=" + support::cpp11::to_string(pool_stride_x));
+ build_opts.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(pool_stride_y));
+ build_opts.add_option("-DPAD_X=" + support::cpp11::to_string(pool_pad_left));
+ build_opts.add_option("-DPAD_Y=" + support::cpp11::to_string(pool_pad_top));
+ build_opts.add_option("-DPOOL_SIZE_X=" + support::cpp11::to_string(pool_size_x));
+ build_opts.add_option("-DPOOL_SIZE_Y=" + support::cpp11::to_string(pool_size_y));
+ build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(src->dimension(idx_width)));
+ build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_height)));
+ build_opts.add_option("-DMAX_WIDTH=" + support::cpp11::to_string(src->dimension(idx_width) + (exclude_padding ? 0 : pool_pad_left)));
+ build_opts.add_option("-DMAX_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_height) + (exclude_padding ? 0 : pool_pad_top)));
// Tensor paddings are used to calculate the indicies for MAX pooling
if(pool_info.pool_size == Size2D(2, 2) && pool_type == PoolingType::MAX && indices && is_data_type_float(data_type))
{
- build_opts.add_option("-DPAD_TENSOR_LEFT=" + support::cpp11::to_string(src->padding().left));
- build_opts.add_option("-DPAD_TENSOR_RIGHT=" + support::cpp11::to_string(src->padding().right));
- build_opts.add_option("-DPAD_TENSOR_TOP=" + support::cpp11::to_string(src->padding().top));
- build_opts.add_option("-DPAD_TENSOR_BOTTOM=" + support::cpp11::to_string(src->padding().bottom));
- build_opts.add_option("-DTENSOR_CHANNEL=" + support::cpp11::to_string(src->dimension(idx_channel)));
- build_opts.add_option("-DTENSOR_WIDTH=" + support::cpp11::to_string(src->dimension(idx_width)));
- build_opts.add_option("-DTENSOR_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_height)));
+ build_opts.add_option("-DSRC_BATCH=" + support::cpp11::to_string(src->tensor_shape().total_size_lower(3)));
}
- if(is_data_type_quantized_asymmetric(data_type) && src->quantization_info() != dst->quantization_info())
+ if(is_data_type_quantized_asymmetric(data_type))
{
- const UniformQuantizationInfo iq_info = src->quantization_info().uniform();
- const UniformQuantizationInfo oq_info = dst->quantization_info().uniform();
-
- build_opts.add_option("-DOFFSET_IN1=" + float_to_string_with_full_precision(iq_info.offset));
- build_opts.add_option("-DOFFSET_OUT=" + float_to_string_with_full_precision(oq_info.offset));
- build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq_info.scale));
- build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
- }
-
- // Check dst dimensions
- auto_init(src, dst, indices, pool_info);
+ build_opts.add_option("-DQUANTIZED");
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, pool_info, indices));
+ if(src->quantization_info() != dst->quantization_info())
+ {
+ const UniformQuantizationInfo iq_info = src->quantization_info().uniform();
+ const UniformQuantizationInfo oq_info = dst->quantization_info().uniform();
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
- build_opts.add_option("-DPOOL_" + string_from_pooling_type(pool_type));
- build_opts.add_option("-DSTRIDE_X=" + support::cpp11::to_string(pool_stride_x));
- build_opts.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(pool_stride_y));
- build_opts.add_option("-DPAD_X=" + support::cpp11::to_string(pool_pad_left));
- build_opts.add_option("-DPAD_Y=" + support::cpp11::to_string(pool_pad_top));
- build_opts.add_option("-DPOOL_SIZE_X=" + support::cpp11::to_string(pool_size_x));
- build_opts.add_option("-DPOOL_SIZE_Y=" + support::cpp11::to_string(pool_size_y));
+ build_opts.add_option("-DOFFSET_IN1=" + float_to_string_with_full_precision(iq_info.offset));
+ build_opts.add_option("-DOFFSET_OUT=" + float_to_string_with_full_precision(oq_info.offset));
+ build_opts.add_option("-DSCALE_IN1=" + float_to_string_with_full_precision(iq_info.scale));
+ build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale));
+ }
+ }
// Set the initial value for the pooling operation accordingly with the data type
if(pool_type == PoolingType::MAX)
@@ -309,9 +188,6 @@ void ClPool2dKernel::configure(const ClCompileContext &compile_context, ITensorI
build_opts.add_option("-DINITIAL_VALUE=0");
}
- build_opts.add_option("-DMAX_WIDTH=" + support::cpp11::to_string(src->dimension(idx_width) + (exclude_padding ? 0 : pool_pad_left)));
- build_opts.add_option("-DMAX_HEIGHT=" + support::cpp11::to_string(src->dimension(idx_height) + (exclude_padding ? 0 : pool_pad_top)));
-
// Create kernel
switch(_data_layout)
{
@@ -319,7 +195,7 @@ void ClPool2dKernel::configure(const ClCompileContext &compile_context, ITensorI
{
const auto use_fp_mixed_precision = (data_type == DataType::F16) && pool_info.fp_mixed_precision;
const auto use_wider_accumulator = use_fp_mixed_precision && (pool_type != PoolingType::MAX);
- const auto acc_data_type = get_cl_type_from_data_type(use_wider_accumulator ? DataType::F32 : data_type);
+ const auto acc_data_type = get_cl_type_from_data_type(use_wider_accumulator ? DataType::F32 : (is_data_type_quantized(data_type) ? DataType::S32 : data_type));
build_opts.add_option("-DACC_DATA_TYPE=" + acc_data_type);
build_opts.add_option_if(use_wider_accumulator, "-DFP_MIXED_PRECISION");
@@ -328,33 +204,15 @@ void ClPool2dKernel::configure(const ClCompileContext &compile_context, ITensorI
build_opts.add_option_if(exclude_padding, "-DEXCLUDE_PADDING");
}
- if((pool_size_x == 3) && (pool_size_y == 3) && !is_data_type_quantized_asymmetric(data_type))
- {
- // Check if we have pool3x3 with stride_x less equal than 3. In these cases, run an optimized OpenCL kernel where
- // each thread computes 4 dst elements
- const bool is_pool3x3_stride_le3 = (pool_size_x == 3) && (pool_size_y == 3) && (pool_stride_x <= 3);
-
- std::string kernel_name = ((is_pool3x3_stride_le3) ? "pooling_layer_optimized_" : "pooling_layer_")
- + support::cpp11::to_string(pool_size_x);
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
- }
- else if(pool_info.pool_size == Size2D(2, 2) && pool_type == PoolingType::MAX && indices && is_data_type_float(data_type))
+ if(pool_info.pool_size == Size2D(2, 2) && pool_type == PoolingType::MAX && indices && is_data_type_float(data_type))
{
// For max pooling with pool2x2, store indicies which will be used in max unpooling
- if(data_type == DataType::F32)
- {
- std::string kernel_name = "pooling_layer_2_nchw_indices_fp32";
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
- }
- else if(data_type == DataType::F16)
- {
- std::string kernel_name = "pooling_layer_2_nchw_indices_fp16";
- _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
- }
+ std::string kernel_name = "pooling_layer_2_nchw_indices";
+ _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
}
else // Run general case
{
- std::string kernel_name = is_data_type_quantized_asymmetric(data_type) ? "pooling_layer_MxN_quantized_nchw" : "pooling_layer_MxN_nchw";
+ std::string kernel_name = "pooling_layer_MxN_nchw";
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
}
break;
@@ -405,6 +263,10 @@ void ClPool2dKernel::configure(const ClCompileContext &compile_context, ITensorI
ARM_COMPUTE_ERROR("Not implemented");
}
+ // Configure kernel window
+ Window win = calculate_max_window(*dst, Steps(_num_elems_processed_per_iteration));
+ ICLKernel::configure_internal(win);
+
// Set config_id for enabling LWS tuning
_config_id = "pooling_layer_";
_config_id += lower_string(string_from_data_type(data_type));
@@ -419,14 +281,12 @@ void ClPool2dKernel::configure(const ClCompileContext &compile_context, ITensorI
_config_id += "_";
_config_id += lower_string(string_from_data_layout(src->data_layout()));
- ARM_COMPUTE_ERROR_ON(src->data_layout() == DataLayout::NHWC && has_padding_changed(padding_info));
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status ClPool2dKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, pool_info, indices));
- ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(src->clone().get(), dst->clone().get(), pool_info)));
-
return Status{};
}
@@ -453,18 +313,9 @@ void ClPool2dKernel::run_op(ITensorPack &tensors, const Window &window, cl::Comm
Window slice = window_collapsed.first_slice_window_3D();
do
{
- // Upsample src by pool size
- Window in_slice(slice);
- in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start() - _pool_info.pad_stride_info.pad_left(),
- (in_slice.x().end() - _pool_info.pad_stride_info.pad_left()) * pool_stride_x,
- pool_stride_x * _num_elems_processed_per_iteration));
- in_slice.set(Window::DimY, Window::Dimension(in_slice.y().start() - _pool_info.pad_stride_info.pad_top(),
- (in_slice.y().end() - _pool_info.pad_stride_info.pad_top()) * pool_stride_y,
- pool_stride_y));
-
// Set srcs
unsigned int idx = 0;
- add_3D_tensor_argument(idx, src, in_slice);
+ add_3D_tensor_argument(idx, src, slice);
add_3D_tensor_argument(idx, dst, slice);
if(indices && is_data_type_float(src->info()->data_type()) && (_pool_info.pool_size == Size2D(2, 2)))
{
diff --git a/src/gpu/cl/kernels/ClPool2dKernel.h b/src/gpu/cl/kernels/ClPool2dKernel.h
index 61d204dc68..f5bb0687e8 100644
--- a/src/gpu/cl/kernels/ClPool2dKernel.h
+++ b/src/gpu/cl/kernels/ClPool2dKernel.h
@@ -61,12 +61,10 @@ public:
// Inherited methods overridden:
void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override;
- BorderSize border_size() const override;
public:
PoolingLayerInfo _pool_info{};
DataLayout _data_layout{ DataLayout::UNKNOWN };
- BorderSize _border_size{ 0 };
unsigned int _num_elems_processed_per_iteration{ 1 };
};
} // namespace kernels
diff --git a/src/gpu/cl/operators/ClPool2d.cpp b/src/gpu/cl/operators/ClPool2d.cpp
index fdadd199fc..a5b18a2340 100644
--- a/src/gpu/cl/operators/ClPool2d.cpp
+++ b/src/gpu/cl/operators/ClPool2d.cpp
@@ -25,7 +25,6 @@
#include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/gpu/cl/ClCompileContext.h"
#include "src/gpu/cl/kernels/ClPool2dKernel.h"
@@ -40,62 +39,15 @@ void ClPool2d::configure(const ClCompileContext &compile_context, ITensorInfo *s
auto k = std::make_unique<kernels::ClPool2dKernel>();
k->set_target(CLScheduler::get().target());
k->configure(compile_context, src, dst, info, indices);
- _pooling = std::move(k);
-
- const DataType data_type = src->data_type();
-
- // Configure border depending on operation required (quantize border in case of asymmetric data_type)
- BorderMode border_mode{};
- PixelValue pixel_value(0.f);
- if(is_data_type_quantized_asymmetric(data_type) && !info.exclude_padding)
- {
- pixel_value = PixelValue(0, data_type, src->quantization_info());
- }
-
- // Data layout
- const auto data_layout = info.data_layout == DataLayout::UNKNOWN ? src->data_layout() : info.data_layout;
-
- switch(data_layout)
- {
- case DataLayout::NCHW:
- border_mode = (PoolingType::MAX == info.pool_type) ? BorderMode::REPLICATE : BorderMode::CONSTANT;
- break;
- case DataLayout::NHWC:
- border_mode = BorderMode::CONSTANT;
- if(PoolingType::MAX == info.pool_type)
- {
- if(is_data_type_quantized(data_type))
- {
- std::tie(pixel_value, std::ignore) = get_min_max(data_type);
- }
- else
- {
- pixel_value = PixelValue(std::numeric_limits<float>::lowest());
- }
- }
- break;
- default:
- ARM_COMPUTE_ERROR("Data layout not supported");
- }
- auto b = std::make_unique<CLFillBorderKernel>();
- b->configure(compile_context, src, _pooling->border_size(), border_mode, pixel_value);
- _border_handler = std::move(b);
+ _kernel = std::move(k);
// Tune kernels
- CLScheduler::get().tune_kernel_static(*_pooling);
+ CLScheduler::get().tune_kernel_static(*_kernel);
}
Status ClPool2d::validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &info, const ITensorInfo *indices)
{
return kernels::ClPool2dKernel::validate(src, dst, info, indices);
}
-
-void ClPool2d::run(ITensorPack &tensors)
-{
- ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
-
- CLScheduler::get().enqueue_op(*_border_handler.get(), tensors, false);
- CLScheduler::get().enqueue_op(*_pooling.get(), tensors, false);
-}
} // namespace opencl
} // namespace arm_compute
diff --git a/src/gpu/cl/operators/ClPool2d.h b/src/gpu/cl/operators/ClPool2d.h
index a041053bb3..f353ba262e 100644
--- a/src/gpu/cl/operators/ClPool2d.h
+++ b/src/gpu/cl/operators/ClPool2d.h
@@ -35,7 +35,6 @@ namespace opencl
{
/** Basic function to simulate a pooling layer with the specified pooling operation. This function calls the following OpenCL kernels:
*
- * -# @ref CLFillBorderKernel (executed if padding size is different from zero)
* -# @ref opencl::ClPool2d
*/
class ClPool2d : public IClOperator
@@ -59,13 +58,6 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &info, const ITensorInfo *indices = nullptr);
-
- // Inherited method overridden
- void run(ITensorPack &tensors) override;
-
-private:
- std::unique_ptr<ICLKernel> _pooling{ nullptr };
- std::unique_ptr<ICLKernel> _border_handler{ nullptr };
};
} // namespace opencl
} // namespace arm_compute