aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2021-01-02 09:58:51 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-01-19 13:43:52 +0000
commitff1fe3e32e25069fed750cdfe3046b7d8d5a2628 (patch)
tree9c01379de63f6ab218c7890dc91b10ac8faac157
parent6124390be4690ba06c404d56449f7e5d390cef53 (diff)
downloadComputeLibrary-ff1fe3e32e25069fed750cdfe3046b7d8d5a2628.tar.gz
Remove padding from direct convolution - OpenCL
- Refactor direct convolution for NHWC - Remove old kernels for NHWC - Change the heuristic in CLConvolutionLayer.cpp. The new direct convolution implementation is faster than FFT Resolves COMPMID-3908 Change-Id: Iee15ce7b04e21847b6eaae5c6d3c1b18180e7efc Signed-off-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4876 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--src/core/CL/CLKernelLibrary.cpp11
-rw-r--r--src/core/CL/cl_kernels/direct_convolution.cl602
-rw-r--r--src/core/CL/cl_kernels/direct_convolution1x1.cl118
-rw-r--r--src/core/CL/cl_kernels/direct_convolution3x3.cl181
-rw-r--r--src/core/CL/cl_kernels/direct_convolution5x5.cl238
-rw-r--r--src/core/CL/cl_kernels/direct_convolution9x9.cl364
-rw-r--r--src/core/CL/cl_kernels/direct_convolution_quantized.cl718
-rw-r--r--src/core/CL/cl_kernels/gemm_helpers.h197
-rw-r--r--src/core/CL/cl_kernels/gemmlowp.cl3
-rw-r--r--src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp467
-rw-r--r--src/runtime/CL/functions/CLConvolutionLayer.cpp45
11 files changed, 1114 insertions, 1830 deletions
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index dadb3f4db1..3e5b70a142 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -137,17 +137,14 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "dequantization_layer_per_channel_nchw", "dequantization_layer.cl" },
{ "derivative", "derivative.cl" },
{ "dilate", "dilate.cl" },
+ { "direct_convolution_nhwc", "direct_convolution.cl" },
{ "direct_convolution1x1", "direct_convolution1x1.cl" },
- { "direct_convolution1x1_nhwc", "direct_convolution1x1.cl" },
{ "direct_convolution1x1_f32_bifrost", "direct_convolution1x1.cl" },
{ "direct_convolution3x3", "direct_convolution3x3.cl" },
- { "direct_convolution3x3_nhwc", "direct_convolution3x3.cl" },
{ "direct_convolution3x3_f32_bifrost", "direct_convolution3x3.cl" },
{ "direct_convolution5x5", "direct_convolution5x5.cl" },
- { "direct_convolution5x5_nhwc", "direct_convolution5x5.cl" },
{ "direct_convolution5x5_f32_bifrost", "direct_convolution5x5.cl" },
{ "direct_convolution_quantized", "direct_convolution_quantized.cl" },
- { "direct_convolution9x9_nhwc", "direct_convolution9x9.cl" },
{ "elementwise_operation_ADD", "elementwise_operation.cl" },
{ "elementwise_operation_SUB", "elementwise_operation.cl" },
{ "elementwise_operation_MAX", "elementwise_operation.cl" },
@@ -631,8 +628,8 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
#include "./cl_kernels/direct_convolution_quantized.clembed"
},
{
- "direct_convolution9x9.cl",
-#include "./cl_kernels/direct_convolution9x9.clembed"
+ "direct_convolution.cl",
+#include "./cl_kernels/direct_convolution.clembed"
},
{
"elementwise_operation.cl",
diff --git a/src/core/CL/cl_kernels/direct_convolution.cl b/src/core/CL/cl_kernels/direct_convolution.cl
new file mode 100644
index 0000000000..3efb01b0b5
--- /dev/null
+++ b/src/core/CL/cl_kernels/direct_convolution.cl
@@ -0,0 +1,602 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "gemm_helpers.h"
+#include "helpers.h"
+#include "helpers_asymm.h"
+#include "repeat.h"
+
+#define CONCAT(a, b) a##b
+
+#if defined(IS_QUANTISED)
+
+#define ARM_OFFSET1(a, b, c) \
+ ({ \
+ c += (ACC_DATA_TYPE)a * (ACC_DATA_TYPE)b; \
+ })
+#define ARM_OFFSET2(a, b, c) \
+ ({ \
+ c += (ACC_DATA_TYPE)a.s0 * (ACC_DATA_TYPE)b; \
+ c += (ACC_DATA_TYPE)a.s1 * (ACC_DATA_TYPE)b; \
+ })
+#define ARM_OFFSET3(a, b, c) \
+ ({ \
+ ARM_OFFSET2(a, b, c); \
+ c += (ACC_DATA_TYPE)a.s2 * (ACC_DATA_TYPE)b; \
+ })
+#define ARM_OFFSET4(a, b, c) \
+ ({ \
+ ARM_OFFSET3(a, b, c); \
+ c += (ACC_DATA_TYPE)a.s3 * (ACC_DATA_TYPE)b; \
+ })
+#define ARM_OFFSET8(a, b, c) \
+ ({ \
+ ARM_OFFSET4((a.lo), (b), c); \
+ ARM_OFFSET4((a.hi), (b), c); \
+ })
+#define ARM_OFFSET16(a, b, c) \
+ ({ \
+ ARM_OFFSET8((a.lo), (b), c); \
+ ARM_OFFSET8((a.hi), (b), c); \
+ })
+
+#if N0 == 1
+#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
+ ({ \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##0), (a_offset), (c)); \
+ })
+#elif N0 == 2 // N) == 3
+#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
+ ({ \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s0)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##0), (a_offset), (c.s0)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s1)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##1), (a_offset), (c.s1)); \
+ })
+#elif N0 == 3 // N0 == 3
+#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
+ ({ \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s0)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##0), (a_offset), (c.s0)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s1)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##1), (a_offset), (c.s1)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s2)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##2), (a_offset), (c.s2)); \
+ })
+#elif N0 == 4 // N0 == 4
+#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
+ ({ \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s0)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##0), (a_offset), (c.s0)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s1)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##1), (a_offset), (c.s1)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s2)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##2), (a_offset), (c.s2)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s3)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##3), (a_offset), (c.s3)); \
+ })
+#elif N0 == 8 // N0 == 8
+#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
+ ({ \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s0)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##0), (a_offset), (c.s0)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s1)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##1), (a_offset), (c.s1)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s2)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##2), (a_offset), (c.s2)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s3)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##3), (a_offset), (c.s3)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s4)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##4), (a_offset), (c.s4)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s5)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##5), (a_offset), (c.s5)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s6)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##6), (a_offset), (c.s6)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s7)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##7), (a_offset), (c.s7)); \
+ })
+#elif N0 == 16 // N0 == 16
+#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
+ ({ \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s0)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##0), (a_offset), (c.s0)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s1)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##1), (a_offset), (c.s1)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s2)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##2), (a_offset), (c.s2)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s3)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##3), (a_offset), (c.s3)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s4)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##4), (a_offset), (c.s4)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s5)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##5), (a_offset), (c.s5)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s6)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##6), (a_offset), (c.s6)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s7)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##7), (a_offset), (c.s7)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s8)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##8), (a_offset), (c.s8)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.s9)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##9), (a_offset), (c.s9)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.sA)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##A), (a_offset), (c.sA)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.sB)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##B), (a_offset), (c.sB)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.sC)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##C), (a_offset), (c.sC)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.sD)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##D), (a_offset), (c.sD)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.sE)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##E), (a_offset), (c.sE)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((a), (b_offset), (c.sF)); \
+ CONCAT(ARM_OFFSET, k0) \
+ ((b##F), (a_offset), (c.sF)); \
+ })
+#else // N0 not supported
+#error "N0 value not supported"
+#endif // N0 conditions
+#else // defined(IS_QUANTISED)
+#define ARM_OFFSET_K0XN0(k0, a, b, a_offset, b_offset, c) \
+ ({})
+#endif // defined(IS_QUANTISED)
+
+#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) && defined(IS_QUANTISED)
+#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
+#define ARM_DOT(x, y, val) val = arm_dot_acc((x), (y), (val));
+#else // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
+#define ARM_DOT(x, y, val) val += arm_dot((x), (y));
+#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
+
+#define ARM_DOT1(a, b, c) \
+ ({ \
+ ARM_DOT((VEC_DATA_TYPE(SRC_DATA_TYPE, 4))(a, (VEC_DATA_TYPE(SRC_DATA_TYPE, 3))0), (VEC_DATA_TYPE(WEI_DATA_TYPE, 4))(b, (VEC_DATA_TYPE(WEI_DATA_TYPE, 3))0), c); \
+ })
+#define ARM_DOT2(a, b, c) \
+ ({ \
+ ARM_DOT((VEC_DATA_TYPE(SRC_DATA_TYPE, 4))(a, (VEC_DATA_TYPE(SRC_DATA_TYPE, 2))0), (VEC_DATA_TYPE(WEI_DATA_TYPE, 4))(b, (VEC_DATA_TYPE(WEI_DATA_TYPE, 2))0), c); \
+ })
+#define ARM_DOT3(a, b, c) \
+ ({ \
+ ARM_DOT((VEC_DATA_TYPE(SRC_DATA_TYPE, 4))(a, (SRC_DATA_TYPE)0), (VEC_DATA_TYPE(WEI_DATA_TYPE, 4))(b, (WEI_DATA_TYPE)0), c); \
+ })
+#define ARM_DOT4(a, b, c) \
+ ({ \
+ ARM_DOT(a, b, c); \
+ })
+#define ARM_DOT8(a, b, c) \
+ ({ \
+ ARM_DOT4((a.lo), (b.lo), c); \
+ ARM_DOT4((a.hi), (b.hi), c); \
+ })
+#define ARM_DOT16(a, b, c) \
+ ({ \
+ ARM_DOT8((a.lo), (b.lo), c); \
+ ARM_DOT8((a.hi), (b.hi), c); \
+ })
+
+#else // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) && defined(IS_QUANTISED)
+
+#define ARM_DOT1(a, b, c) \
+ ({ \
+ c += (ACC_DATA_TYPE)a * (ACC_DATA_TYPE)b; \
+ })
+#define ARM_DOT2(a, b, c) \
+ ({ \
+ c += (ACC_DATA_TYPE)a.s0 * (ACC_DATA_TYPE)b.s0; \
+ c += (ACC_DATA_TYPE)a.s1 * (ACC_DATA_TYPE)b.s1; \
+ })
+#define ARM_DOT3(a, b, c) \
+ ({ \
+ ARM_DOT2(a, b, c); \
+ c += (ACC_DATA_TYPE)a.s2 * (ACC_DATA_TYPE)b.s2; \
+ })
+#define ARM_DOT4(a, b, c) \
+ ({ \
+ ARM_DOT3(a, b, c); \
+ c += (ACC_DATA_TYPE)a.s3 * (ACC_DATA_TYPE)b.s3; \
+ })
+#define ARM_DOT8(a, b, c) \
+ ({ \
+ ARM_DOT4((a.lo), (b.lo), c); \
+ ARM_DOT4((a.hi), (b.hi), c); \
+ })
+#define ARM_DOT16(a, b, c) \
+ ({ \
+ ARM_DOT8((a.lo), (b.lo), c); \
+ ARM_DOT8((a.hi), (b.hi), c); \
+ })
+#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
+
+#if N0 == 1
+#define ARM_DOT_K0XN0(k0, a, b, c) \
+ ({ \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##0), (c)); \
+ })
+#elif N0 == 2 // N) == 3
+#define ARM_DOT_K0XN0(k0, a, b, c) \
+ ({ \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##0), (c.s0)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##1), (c.s1)); \
+ })
+#elif N0 == 3 // N0 == 3
+#define ARM_DOT_K0XN0(k0, a, b, c) \
+ ({ \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##0), (c.s0)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##1), (c.s1)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##2), (c.s2)); \
+ })
+#elif N0 == 4 // N0 == 4
+#define ARM_DOT_K0XN0(k0, a, b, c) \
+ ({ \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##0), (c.s0)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##1), (c.s1)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##2), (c.s2)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##3), (c.s3)); \
+ })
+#elif N0 == 8 // N0 == 8
+#define ARM_DOT_K0XN0(k0, a, b, c) \
+ ({ \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##0), (c.s0)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##1), (c.s1)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##2), (c.s2)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##3), (c.s3)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##4), (c.s4)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##5), (c.s5)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##6), (c.s6)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##7), (c.s7)); \
+ })
+#elif N0 == 16 // N0 == 16
+#define ARM_DOT_K0XN0(k0, a, b, c) \
+ ({ \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##0), (c.s0)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##1), (c.s1)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##2), (c.s2)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##3), (c.s3)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##4), (c.s4)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##5), (c.s5)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##6), (c.s6)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##7), (c.s7)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##8), (c.s8)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##9), (c.s9)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##A), (c.sA)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##B), (c.sB)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##C), (c.sC)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##D), (c.sD)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##E), (c.sE)); \
+ CONCAT(ARM_DOT, k0) \
+ ((a), (b##F), (c.sF)); \
+ })
+#else // N0 not supported
+#error "N0 value not supported"
+#endif // N0 conditions
+
+/** OpenCL kernel to compute the direct convolution.
+ *
+ * @note Data layout supported: NHWC
+ * @note Data type supported: F32/F16/QASYMM8
+ * @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=half)
+ * @note The accumulation data type must be passed at compile time using -DACC_DATA_TYPE (e.g. -DDATA_TYPE_PROMOTED=half)
+ * @note The convolution padding (left and top) must be passed at compile time using -DPAD_LEFT and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The convolution strides must be passed at compile time using -DSTRIDE and -DPAD_TOP (e.g. -DPAD_LEFT=2, -DPAD_TOP=2)
+ * @note The spatial dimensions of the weights must be passed at compile time using -DWEI_WIDTH and -DWEI_HEIGHT (e.g. -DWEI_WIDTH=9, -DWEI_HEIGHT=9)
+ * @note The spatial dimensions of the source tensor must be passed at compile time using -DSRC_WIDTH and -DSRC_HEIGHT (e.g. -DSRC_WIDTH=96, -DSRC_HEIGHT=64)
+ * @note The spatial dimensions of the destination tensor must be passed at compile time using -DDST_WIDTH and -DDST_HEIGHT (e.g. -DDST_WIDTH=96, -DDST_HEIGHT=64)
+ * @note The channels of the source tensor must be passed at compile time using -DSRC_CHANNELS (e.g. -DSRC_CHANNELS=64)
+ * @note The channels of the destination tensor must be passed at compile time using -DDST_CHANNELS (e.g. -DDDST_CHANNELS=64)
+ * @note The data type of the source tensor must be passed at compile time using -DSRC_DATA_TYPE (e.g. -DSRC_DATA_TYPE=float)
+ * @note The data type of the weights tensor must be passed at compile time using -DWEI_DATA_TYPE (e.g. -DWEI_DATA_TYPE=float)
+ * @note The data type of the destination tensor must be passed at compile time using -DDST_DATA_TYPE (e.g. -DDST_DATA_TYPE=float)
+ * @note The data type of the accumulators must be passed at compile time using -DACC_DATA_TYPE (e.g. -DACC_DATA_TYPE=float)
+ * @note The number of M0 rows (width*height) to process must be passed at compile time using -DM0 (e.g. -DM0=2)
+ * @note The number of N0 output channels to process must be passed at compile time using -DN0 (e.g. -DN0=2)
+ * @note The number of K0 inner accumulations must be passed at compile time using -DK0 (e.g. -DK0=2)
+ * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
+ * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
+ * @note Only the following configurations of M0, N0 and K0 are currently supported:
+ * - M0 = 1
+ * - N0 = 2, 3, 4, 8, 16
+ * - K0 = 2, 3, 4, 8, 16
+ *
+ *@note In case of QASYMM8, the following extra information must be passed at compile time:
+ * - -DIS_QUANTISED
+ * - The destination quantization multiplier e.g. -DDST_MULTIPLIER=1234
+ * - The destination quantization shift e.g. -DDST_SHIFT=4
+ * - The destination offset e.g. -DDST_OFFSET=4
+ * - The source offset e.g. -DSRC_OFFSET=4
+ * - The weights offset e.g. -DWEI_OFFSET=4
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data type: F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data type: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] wei_ptr Pointer to the weights tensor. Supported data type: same as @p src_ptr
+ * @param[in] wei_stride_x Stride of the weights tensor in X dimension (in bytes)
+ * @param[in] wei_step_x wei_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] wei_stride_y Stride of the weights tensor in Y dimension (in bytes)
+ * @param[in] wei_step_y wei_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] wei_stride_z Stride of the weights tensor in Z dimension (in bytes)
+ * @param[in] wei_step_z wei_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] wei_offset_first_element_in_bytes The offset of the first element in the bias matrix
+ * @param[in] bia_ptr (Optional) Pointer to the bias tensor Supported data type: same as @p src_ptr (if F32/F16) or S32 (if QASYMM8)
+ * @param[in] bia_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes)
+ * @param[in] bia_step_x (Optional) bia_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] bia_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
+ * @param[in] wei_stride_w Stride of the weights tensor in W dimension (in bytes)
+ */
+__kernel void direct_convolution_nhwc(
+ TENSOR3D_DECLARATION(src),
+ TENSOR3D_DECLARATION(dst),
+ TENSOR3D_DECLARATION(wei),
+#if defined(HAS_BIAS)
+ VECTOR_DECLARATION(bia),
+#endif // defined(HAS_BIAS)
+ unsigned int wei_stride_w)
+{
+#if M0 != 1
+#error "M0: Only supported 1"
+#endif // M0 != 1
+
+ const int cout = max((int)(get_global_id(0) * N0 - (N0 - PARTIAL_STORE_N0) % N0), 0); // input channels
+ const int mout = get_global_id(1); // width x height
+ const int zout = get_global_id(2); // batch size index
+
+ REPEAT_VAR_INIT_TO_CONST(16, int, zero, 0);
+ REPEAT_VAR_INIT_TO_CONST(M0, int, xi, 0);
+ REPEAT_VAR_INIT_TO_CONST(M0, int, yi, 0);
+
+#define LINEAR_2_COORDS(i) \
+ xi##i = ((mout * M0 + i) % DST_WIDTH) * STRIDE_X; \
+ yi##i = ((mout * M0 + i) / DST_WIDTH) * STRIDE_Y; \
+ xi##i -= PAD_LEFT; \
+ yi##i -= PAD_TOP;
+
+ // Convert the linear index to coordinate
+ LINEAR_2_COORDS(0);
+
+#undef LINEAR_2_COORDS
+
+ uint src_offset = src_offset_first_element_in_bytes + zout * src_stride_y * (SRC_WIDTH * SRC_HEIGHT);
+ uint wei_offset = wei_offset_first_element_in_bytes + cout * wei_stride_w;
+
+ // Initialize the accumulators
+ REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0);
+
+ for(int i = 0; i < (WEI_WIDTH * WEI_HEIGHT); ++i)
+ {
+ int tmp = 0;
+ int xk = i % WEI_WIDTH;
+ int yk = i / WEI_WIDTH;
+
+ REPEAT_VAR_INIT_TO_CONST(M0, int, mi_valid_row, 0);
+ REPEAT_VAR_INIT_TO_CONST(M0, int, mi_mask, 1);
+
+ // Calculate the input row to read from source tensor
+#define MI_INIT(i) \
+ tmp = xi##i + xk + (yi##i + yk) * SRC_WIDTH; \
+ mi_valid_row##i = max(min(xi##i + xk, SRC_WIDTH - 1), 0) + max(min(yi##i + yk, SRC_HEIGHT - 1), 0) * SRC_WIDTH; \
+ if(tmp == mi_valid_row##i) \
+ mi_mask##i = 1; \
+ else \
+ mi_mask##i = 0;
+
+ MI_INIT(0);
+
+#undef MI_INIT
+
+ int k = 0;
+ for(; k <= (SRC_CHANNELS - K0); k += K0)
+ {
+ // Load values from src tensor
+ LOAD_BLOCK_INDIRECT(M0, K0, SRC_DATA_TYPE, a, src_ptr, src_offset + k * sizeof(SRC_DATA_TYPE), src_stride_y, mi_valid_row, mi_mask);
+
+ // Load values from weights tensor
+ LOAD_BLOCK(N0, K0, WEI_DATA_TYPE, b, wei_ptr, wei_offset, wei_stride_w, zero);
+
+#define TENSOR_DOT(i) \
+ ARM_DOT_K0XN0(K0, a##i, b, c##i); \
+ ARM_OFFSET_K0XN0(K0, a##i, b, SRC_OFFSET, WEI_OFFSET, c##i);
+
+ TENSOR_DOT(0);
+
+#undef TENSOR_DOT
+
+ wei_offset += K0 * sizeof(WEI_DATA_TYPE);
+ }
+
+#if(SRC_CHANNELS % K0) != 0
+ // Left-over accumulations
+ for(; i < SRC_CHANNELS; ++i)
+ {
+ // Load values from src tensor
+ LOAD_BLOCK_INDIRECT(M0, 1, SRC_DATA_TYPE, a, src_ptr, src_offset_first_element_in_bytes + k * sizeof(SRC_DATA_TYPE), src_stride_y, mi_valid_row, mi_mask);
+
+ // Load values from weights tensor
+ LOAD_BLOCK(N0, 1, WEI_DATA_TYPE, b, wei_ptr, wei_offset, wei_stride_w, zero);
+
+#define TENSOR_DOT(i) \
+ ARM_DOT_K0XN0(1, a##i, b, c##i); \
+ ARM_OFFSET_K0XN0(1, a##i, b, SRC_OFFSET, WEI_OFFSET, c##i);
+
+ TENSOR_DOT(0);
+
+#undef TENSOR_DOT
+
+ wei_offset += sizeof(WEI_DATA_TYPE);
+ }
+#endif // (SRC_CHANNELS % K0) != 0
+
+ c0 += (SRC_CHANNELS * SRC_OFFSET * WEI_OFFSET);
+ }
+
+ __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (cout * sizeof(DST_DATA_TYPE)) + (mout * M0 * dst_stride_y);
+
+ // Batched direct convolution
+ dst_addr += zout * dst_stride_y * (DST_WIDTH * DST_HEIGHT);
+
+#if defined(HAS_BIAS)
+ __global uchar *bias_addr = bia_ptr + bia_offset_first_element_in_bytes + (cout * sizeof(BIA_DATA_TYPE));
+
+ LOAD_BLOCK(1, N0, BIA_DATA_TYPE, bias, bias_addr, 0, zero0, zero);
+
+ // c = c + bias[broadcasted]
+ ADD_BLOCK_BROADCAST(M0, c, bias0);
+#endif // HAS_BIAS
+
+#if defined(IS_QUANTISED)
+
+ REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DST_DATA_TYPE, N0), cq, 0);
+
+#if DST_SHIFT < 0
+#define QUANTISE(i) \
+ c##i = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(c##i, DST_MULTIPLIER, DST_SHIFT, N0); \
+ c##i = c##i + DST_OFFSET; \
+ cq##i = CONVERT_SAT(c##i, VEC_DATA_TYPE(DST_DATA_TYPE, N0));
+#else // OUTPUT_SHIFT < 0
+#define QUANTISE(i) \
+ c##i = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(c##i, DST_MULTIPLIER, DST_SHIFT, N0); \
+ c##i = c##i + DST_OFFSET; \
+ cq##i = CONVERT_SAT(c##i, VEC_DATA_TYPE(DST_DATA_TYPE, N0));
+#endif // OUTPUT_SHIFT < 0
+
+ QUANTISE(0);
+
+#undef QUANTISE
+
+ STORE_VECTOR_SELECT(cq, DST_DATA_TYPE, dst_addr, N0, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0);
+#else // defined(IS_QUANTISED)
+ STORE_VECTOR_SELECT(c, DST_DATA_TYPE, dst_addr, N0, PARTIAL_STORE_N0, PARTIAL_STORE_N0 != 0 && get_global_id(0) == 0);
+#endif // defined(IS_QUANTISED)
+} \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/direct_convolution1x1.cl b/src/core/CL/cl_kernels/direct_convolution1x1.cl
index d0eea5bfb4..8ab2d1d4ea 100644
--- a/src/core/CL/cl_kernels/direct_convolution1x1.cl
+++ b/src/core/CL/cl_kernels/direct_convolution1x1.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,122 +31,6 @@
#if defined(DATA_TYPE) && defined(DATA_SIZE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH)
-#if defined(DATA_LAYOUT_NHWC)
-
-#define PTR_TO_VALUE(PTR, DATA_TYPE) *((__global DATA_TYPE *)(PTR))
-
-/** This kernel performs a direct convolution to convolve the low three dimensions of a tensor with data layout NHWC
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The data size must be passed at compile time using -DDATA_SIZE e.g. -DDATA_SIZE=32
- * @note The convolution stride x must be passed at compile time using -DSTRIDE_X e.g. -DSTRIDE_X=1
- * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
- * @note In case biases will be added to the convolution -DHAS_BIAS has to be passed to append the final matrix with 1 in each row.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
- * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
- * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- */
-__kernel void direct_convolution1x1_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
-#ifdef HAS_BIAS
- VECTOR_DECLARATION(biases),
-#endif /* defined(HAS_BIAS) */
- unsigned int weights_stride_w)
-{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
-#endif /* defined(HAS_BIAS) */
-
- VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8)
- values = 0;
- const int id0 = get_global_id(0);
- const int id1 = get_global_id(1);
- const int id2 = get_global_id(2);
- weights.ptr += id0 * weights_stride_w;
- __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0) - src_stride_x * id0 + id2 * STRIDE_Y * (int)src_stride_z;
-
- for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d)
- {
- DATA_TYPE weight = *(__global DATA_TYPE *)weights.ptr;
-#if STRIDE_X == 1
- VEC_DATA_TYPE(DATA_TYPE, 8)
- col0 = (VEC_DATA_TYPE(DATA_TYPE, 8))(
- PTR_TO_VALUE(src_addr + 0 * src_stride_y, DATA_TYPE),
- PTR_TO_VALUE(src_addr + 1 * src_stride_y, DATA_TYPE),
- PTR_TO_VALUE(src_addr + 2 * src_stride_y, DATA_TYPE),
- PTR_TO_VALUE(src_addr + 3 * src_stride_y, DATA_TYPE),
- PTR_TO_VALUE(src_addr + 4 * src_stride_y, DATA_TYPE),
- PTR_TO_VALUE(src_addr + 5 * src_stride_y, DATA_TYPE),
- PTR_TO_VALUE(src_addr + 6 * src_stride_y, DATA_TYPE),
- PTR_TO_VALUE(src_addr + 7 * src_stride_y, DATA_TYPE));
-#elif STRIDE_X == 2 /* STRIDE_X == 1 */
- VEC_DATA_TYPE(DATA_TYPE, 8)
- col0 = (VEC_DATA_TYPE(DATA_TYPE, 8))(
- PTR_TO_VALUE(src_addr + 0 * src_stride_y, DATA_TYPE),
- PTR_TO_VALUE(src_addr + 2 * src_stride_y, DATA_TYPE),
- PTR_TO_VALUE(src_addr + 4 * src_stride_y, DATA_TYPE),
- PTR_TO_VALUE(src_addr + 6 * src_stride_y, DATA_TYPE),
- PTR_TO_VALUE(src_addr + 8 * src_stride_y, DATA_TYPE),
- PTR_TO_VALUE(src_addr + 10 * src_stride_y, DATA_TYPE),
- PTR_TO_VALUE(src_addr + 12 * src_stride_y, DATA_TYPE),
- PTR_TO_VALUE(src_addr + 14 * src_stride_y, DATA_TYPE));
-#else /* STRIDE_X not equals 1 or 2 */
-#error "STRIDE_X larger than 2 is not supported"
-#endif /* STRIDE_X == 2 */
- values = ADD_OP(values, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))weight, col0));
-
- src_addr += src_stride_x;
- weights.ptr += weights_stride_x;
- }
-
-#ifdef HAS_BIAS
- values = ADD_OP(values, (VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, id0))));
-#endif /* defined(HAS_BIAS) */
-
- *((__global DATA_TYPE *)dst.ptr) = values.s0;
- *((__global DATA_TYPE *)(dst.ptr + 1 * dst_stride_y)) = values.s1;
- *((__global DATA_TYPE *)(dst.ptr + 2 * dst_stride_y)) = values.s2;
- *((__global DATA_TYPE *)(dst.ptr + 3 * dst_stride_y)) = values.s3;
- *((__global DATA_TYPE *)(dst.ptr + 4 * dst_stride_y)) = values.s4;
- *((__global DATA_TYPE *)(dst.ptr + 5 * dst_stride_y)) = values.s5;
- *((__global DATA_TYPE *)(dst.ptr + 6 * dst_stride_y)) = values.s6;
- *((__global DATA_TYPE *)(dst.ptr + 7 * dst_stride_y)) = values.s7;
-}
-#endif // defined(DATA_LAYOUT_NHWC)
-
#if STRIDE_X == 3
#define INPUT_PIXEL_STR(data_size) extract_input_stride3_##data_size
#define INPUT_PIXEL(data_size) INPUT_PIXEL_STR(data_size)
diff --git a/src/core/CL/cl_kernels/direct_convolution3x3.cl b/src/core/CL/cl_kernels/direct_convolution3x3.cl
index da7a1e7410..811df053c4 100644
--- a/src/core/CL/cl_kernels/direct_convolution3x3.cl
+++ b/src/core/CL/cl_kernels/direct_convolution3x3.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -66,185 +66,6 @@
acc = ADD_OP(acc, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s2468, src0.sACE, src1), (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s2)); \
})
-#if defined(DATA_LAYOUT_NHWC)
-
-#define PTR_TO_VALUE(PTR, DATA_TYPE) *((__global DATA_TYPE *)(PTR))
-
-#if STRIDE_X == 1
-#define CONVOLUTION1x3_NHWC(acc, row_ptr, weights_ptr) CONVOLUTION1x3_STRIDE_NHWC_STRIDE1(acc, row_ptr, weights_ptr)
-#elif STRIDE_X == 2 /* STRIDE_X == 1 */
-#define CONVOLUTION1x3_NHWC(acc, row_ptr, weights_ptr) CONVOLUTION1x3_STRIDE_NHWC_STRIDE2(acc, row_ptr, weights_ptr)
-#else /* STRIDE_X not equals 1 or 2 */
-#error "STRIDE_X larger than 2 is not supported"
-#endif /* STRIDE_X == 2 */
-
-#define CONVOLUTION1x3_STRIDE_NHWC_STRIDE1(acc, row_ptr, weights_ptr) \
- { \
- VEC_DATA_TYPE(DATA_TYPE, 8) \
- src0 = (VEC_DATA_TYPE(DATA_TYPE, 8))( \
- PTR_TO_VALUE(row_ptr + 0 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 1 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 2 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 3 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 4 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 5 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 6 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 7 * src_stride_y, DATA_TYPE)); \
- VEC_DATA_TYPE(DATA_TYPE, 2) \
- src1 = (VEC_DATA_TYPE(DATA_TYPE, 2))( \
- PTR_TO_VALUE(row_ptr + 8 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 9 * src_stride_y, DATA_TYPE)); \
- VEC_DATA_TYPE(DATA_TYPE, 3) \
- weights = (VEC_DATA_TYPE(DATA_TYPE, 3))( \
- PTR_TO_VALUE((weights_ptr) + 0 * weights_stride_y, DATA_TYPE), \
- PTR_TO_VALUE((weights_ptr) + 1 * weights_stride_y, DATA_TYPE), \
- PTR_TO_VALUE((weights_ptr) + 2 * weights_stride_y, DATA_TYPE)); \
- acc = ADD_OP(acc, MUL_OP(src0, (VEC_DATA_TYPE(DATA_TYPE, 8))weights.s0)); \
- acc = ADD_OP(acc, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1234, src0.s567, src1.s0), (VEC_DATA_TYPE(DATA_TYPE, 8))weights.s1)); \
- acc = ADD_OP(acc, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s234, src0.s567, src1.s01), (VEC_DATA_TYPE(DATA_TYPE, 8))weights.s2)); \
- }
-
-#define CONVOLUTION1x3_STRIDE_NHWC_STRIDE2(acc, row_ptr, weights_ptr) \
- { \
- VEC_DATA_TYPE(DATA_TYPE, 16) \
- src0 = (VEC_DATA_TYPE(DATA_TYPE, 16))( \
- PTR_TO_VALUE(row_ptr + 0 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 1 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 2 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 3 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 4 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 5 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 6 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 7 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 8 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 9 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 10 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 11 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 12 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 13 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 14 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 15 * src_stride_y, DATA_TYPE)); \
- DATA_TYPE src1 = PTR_TO_VALUE(row_ptr + 16 * src_stride_y, DATA_TYPE); \
- VEC_DATA_TYPE(DATA_TYPE, 3) \
- weights = (VEC_DATA_TYPE(DATA_TYPE, 3))( \
- PTR_TO_VALUE((weights_ptr) + 0 * weights_stride_y, DATA_TYPE), \
- PTR_TO_VALUE((weights_ptr) + 1 * weights_stride_y, DATA_TYPE), \
- PTR_TO_VALUE((weights_ptr) + 2 * weights_stride_y, DATA_TYPE)); \
- \
- acc = ADD_OP(acc, MUL_OP(src0.s02468ACE, (VEC_DATA_TYPE(DATA_TYPE, 8))weights.s0)); \
- acc = ADD_OP(acc, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1357, src0.s9BDF), (VEC_DATA_TYPE(DATA_TYPE, 8))weights.s1)); \
- acc = ADD_OP(acc, MUL_OP((VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s2468, src0.sACE, src1), (VEC_DATA_TYPE(DATA_TYPE, 8))weights.s2)); \
- }
-
-/** This kernel performs a direct convolution to convolve the low three dimensions.
- *
- * @note This OpenCL kernel works with stride_x = 1 and 2
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
- * @note If biases are used then -DHAS_BIAS has to be passed at compile time
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/QS16/F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
- * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
- * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- */
-__kernel void direct_convolution3x3_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
-#ifdef HAS_BIAS
- VECTOR_DECLARATION(biases),
-#endif /* defined(HAS_BIAS) */
- unsigned int weights_stride_w)
-{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
- VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8)
- values0 = 0;
- const int id0 = get_global_id(0);
- const int id1 = get_global_id(1);
- const int id2 = get_global_id(2);
-
- __global uchar *weights_addr = (__global uchar *)tensor3D_offset(&weights, 0, 0, 0);
- __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0) - src_stride_x * id0 + ((id2 * STRIDE_Y) - PAD_TOP) * (int)src_stride_z;
-
- weights_addr += id0 * weights_stride_w;
-
- const int coordy = ((id2 * STRIDE_Y) - PAD_TOP);
- for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d)
- {
-#if PAD_TOP > 0
- if(coordy < 0) // special case Z = -1 doesn't exists
- {
- //skip first row and load the two next ones
- CONVOLUTION1x3_NHWC(values0, src_addr + 1 * (int)src_stride_z, (weights_addr + 1 * (int)weights_stride_z));
- CONVOLUTION1x3_NHWC(values0, src_addr + 2 * (int)src_stride_z, (weights_addr + 2 * (int)weights_stride_z));
- }
- else if(coordy == (SRC_HEIGHT - PAD_TOP - 1))
- {
- // special case when computing the last row of the output we must read the last three rows from the input buffer (including padding) but the
- // Z axis has no padding at all.
- CONVOLUTION1x3_NHWC(values0, src_addr, (weights_addr + 0 * (int)weights_stride_z));
- CONVOLUTION1x3_NHWC(values0, src_addr + 1 * (int)src_stride_z, (weights_addr + 1 * (int)weights_stride_z));
- }
- else
- {
- CONVOLUTION1x3_NHWC(values0, src_addr, (weights_addr + 0 * (int)weights_stride_z));
- CONVOLUTION1x3_NHWC(values0, src_addr + 1 * (int)src_stride_z, (weights_addr + 1 * (int)weights_stride_z));
- CONVOLUTION1x3_NHWC(values0, src_addr + 2 * (int)src_stride_z, (weights_addr + 2 * (int)weights_stride_z));
- }
-#else // PAD_TOP > 0
- CONVOLUTION1x3_NHWC(values0, src_addr, (weights_addr + 0 * (int)weights_stride_z));
- CONVOLUTION1x3_NHWC(values0, src_addr + 1 * (int)src_stride_z, (weights_addr + 1 * (int)weights_stride_z));
- CONVOLUTION1x3_NHWC(values0, src_addr + 2 * (int)src_stride_z, (weights_addr + 2 * (int)weights_stride_z));
-#endif // PAD_TOP > 0
- src_addr += src_stride_x;
- weights_addr += weights_stride_x;
- }
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
- values0 = ADD_OP(values0, (VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, id0))));
-#endif /* defined(HAS_BIAS) */
-
- *((__global DATA_TYPE *)(dst.ptr + 0 * dst_stride_y)) = values0.s0;
- *((__global DATA_TYPE *)(dst.ptr + 1 * dst_stride_y)) = values0.s1;
- *((__global DATA_TYPE *)(dst.ptr + 2 * dst_stride_y)) = values0.s2;
- *((__global DATA_TYPE *)(dst.ptr + 3 * dst_stride_y)) = values0.s3;
- *((__global DATA_TYPE *)(dst.ptr + 4 * dst_stride_y)) = values0.s4;
- *((__global DATA_TYPE *)(dst.ptr + 5 * dst_stride_y)) = values0.s5;
- *((__global DATA_TYPE *)(dst.ptr + 6 * dst_stride_y)) = values0.s6;
- *((__global DATA_TYPE *)(dst.ptr + 7 * dst_stride_y)) = values0.s7;
-}
-#endif // defined(DATA_LAYOUT_NHWC)
-
/** This kernel performs a direct convolution to convolve the low three dimensions.
*
* @note This OpenCL kernel works with stride_x = 1 and 2
diff --git a/src/core/CL/cl_kernels/direct_convolution5x5.cl b/src/core/CL/cl_kernels/direct_convolution5x5.cl
index e5c7a5107d..59d668f0bf 100644
--- a/src/core/CL/cl_kernels/direct_convolution5x5.cl
+++ b/src/core/CL/cl_kernels/direct_convolution5x5.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,242 +69,6 @@
acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s468a, src0.sCE, src1.s02) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_value1; \
})
-#if defined(DATA_LAYOUT_NHWC)
-
-#define PTR_TO_VALUE(PTR, DATA_TYPE) *((__global DATA_TYPE *)(PTR))
-
-#if STRIDE_X == 1
-#define CONVOLUTION1x5_NHWC(acc, row_ptr, weights_ptr) CONVOLUTION1x5_STRIDE1_NHWC(acc, row_ptr, weights_ptr)
-#elif STRIDE_X == 2 /* STRIDE_X == 1 */
-#define CONVOLUTION1x5_NHWC(acc, row_ptr, weights_ptr) CONVOLUTION1x5_STRIDE2_NHWC(acc, row_ptr, weights_ptr)
-#else /* STRIDE_X not equals 1 or 2 */
-#error "STRIDE_X larger than 2 is not supported"
-#endif /* STRIDE_X == 2 */
-
-#define CONVOLUTION1x5_STRIDE1_NHWC(acc, row_ptr, weights_ptr) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 8) \
- src0 = (VEC_DATA_TYPE(DATA_TYPE, 8))( \
- PTR_TO_VALUE(row_ptr + 0 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 1 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 2 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 3 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 4 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 5 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 6 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 7 * src_stride_y, DATA_TYPE)); \
- VEC_DATA_TYPE(DATA_TYPE, 4) \
- src1 = (VEC_DATA_TYPE(DATA_TYPE, 4))( \
- PTR_TO_VALUE(row_ptr + 8 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 9 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 10 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 11 * src_stride_y, DATA_TYPE)); \
- VEC_DATA_TYPE(DATA_TYPE, 4) \
- weights_values0 = (VEC_DATA_TYPE(DATA_TYPE, 4))( \
- PTR_TO_VALUE(weights_ptr + 0 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 1 * weights_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(weights_ptr + 2 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 3 * weights_stride_y, DATA_TYPE)); \
- DATA_TYPE weights_value1 = PTR_TO_VALUE(weights_ptr + 4 * weights_stride_y, DATA_TYPE); \
- acc += src0 * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s0; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1234, src0.s567, src1.s0) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s1; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s234, src0.s567, src1.s01) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s2; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s345, src0.s67, src1.s012) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s3; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s45, src0.s67, src1.s0123) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_value1; \
- })
-
-#define CONVOLUTION1x5_STRIDE2_NHWC(acc, row_ptr, weights_ptr) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 16) \
- src0 = (VEC_DATA_TYPE(DATA_TYPE, 16))( \
- PTR_TO_VALUE(row_ptr + 0 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 1 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 2 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 3 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 4 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 5 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 6 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 7 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 8 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 9 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 10 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 11 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 12 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 13 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 14 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 15 * src_stride_y, DATA_TYPE)); \
- VEC_DATA_TYPE(DATA_TYPE, 4) \
- src1 = (VEC_DATA_TYPE(DATA_TYPE, 4))( \
- PTR_TO_VALUE(row_ptr + 16 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 17 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 18 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 19 * src_stride_y, DATA_TYPE)); \
- VEC_DATA_TYPE(DATA_TYPE, 4) \
- weights_values0 = (VEC_DATA_TYPE(DATA_TYPE, 4))( \
- PTR_TO_VALUE(weights_ptr + 0 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 1 * weights_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(weights_ptr + 2 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 3 * weights_stride_y, DATA_TYPE)); \
- DATA_TYPE weights_value1 = PTR_TO_VALUE(weights_ptr + 4 * weights_stride_y, DATA_TYPE); \
- acc += src0.s02468ACE * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s0; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1357, src0.s9BDF) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s1; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s2468, src0.sACE, src1.s0) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s2; \
- \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s3579, src0.sBDF, src1.s1) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s3; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s468a, src0.sCE, src1.s02) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_value1; \
- })
-
-/** This kernel performs a direct convolution to convolve the low three dimensions in a tensor with the NHWC data layout
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
- * @note If biases are used then -DHAS_BIAS has to be passed at compile time
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
- * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
- * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- */
-__kernel void direct_convolution5x5_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
-#ifdef HAS_BIAS
- VECTOR_DECLARATION(biases),
-#endif /* defined(HAS_BIAS) */
- unsigned int weights_stride_w)
-{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- values0 = 0;
-
- const int id0 = get_global_id(0);
- const int id1 = get_global_id(1);
- const int id2 = get_global_id(2);
-
- __global uchar *weights_addr = (__global uchar *)tensor3D_offset(&weights, 0, 0, 0);
- __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0) - src_stride_x * id0 + ((id2 * STRIDE_Y) - PAD_TOP) * (int)src_stride_z;
-
- weights_addr += id0 * weights_stride_w;
-
-#if(PAD_TOP == 1)
- const int coordy = id2 - PAD_TOP;
- for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d)
- {
- if(coordy < 0) // special case Z = -1 doesn't exists
- {
- //skip first row and load the two next ones
- CONVOLUTION1x5_NHWC(values0, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 4 * (int)src_stride_z), (weights_addr + 4 * (int)weights_stride_z));
- }
- else if(coordy == (DST_HEIGHT - PAD_TOP - 1))
- {
- // special case when computing the last row of the output we must read the last three rows from the input buffer (including padding) but the
- // Z axis has no padding at all.
- CONVOLUTION1x5_NHWC(values0, src_addr, weights_addr);
- CONVOLUTION1x5_NHWC(values0, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z));
- }
- else
- {
- CONVOLUTION1x5_NHWC(values0, src_addr, weights_addr);
- CONVOLUTION1x5_NHWC(values0, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 4 * (int)src_stride_z), (weights_addr + 4 * (int)weights_stride_z));
- }
- src_addr += src_stride_x;
- weights_addr += weights_stride_x;
- }
-#elif(PAD_TOP == 2)
- const int coordy = id2 * STRIDE_Y;
- for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d)
- {
- if(coordy == 0) // special case Z = -2 doesn't exists
- {
- //skip first row and load the two next ones
- CONVOLUTION1x5_NHWC(values0, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 4 * (int)src_stride_z), (weights_addr + 4 * (int)weights_stride_z));
- }
- else if(coordy == 1) // special case Z = -1 doesn't exists
- {
- //skip first row and load the two next ones
- CONVOLUTION1x5_NHWC(values0, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 4 * (int)src_stride_z), (weights_addr + 4 * (int)weights_stride_z));
- }
- else if(coordy == (SRC_HEIGHT - 1))
- {
- // special case when computing the last row of the output we must read the last three rows from the input buffer (including padding) but the
- // Z axis has no padding at all.
- CONVOLUTION1x5_NHWC(values0, src_addr, weights_addr);
- CONVOLUTION1x5_NHWC(values0, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z));
- }
- else if(coordy == (SRC_HEIGHT - 2))
- {
- // special case when computing the last row of the output we must read the last three rows from the input buffer (including padding) but the
- // Z axis has no padding at all.
- CONVOLUTION1x5_NHWC(values0, src_addr, weights_addr);
- CONVOLUTION1x5_NHWC(values0, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z));
- }
- else
- {
- CONVOLUTION1x5_NHWC(values0, src_addr, weights_addr);
- CONVOLUTION1x5_NHWC(values0, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 4 * (int)src_stride_z), (weights_addr + 4 * (int)weights_stride_z));
- }
- src_addr += src_stride_x;
- weights_addr += weights_stride_x;
- }
-
-#else /* PAD_TOP == 2 */
- for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d)
- {
- CONVOLUTION1x5_NHWC(values0, src_addr, weights_addr);
- CONVOLUTION1x5_NHWC(values0, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z));
- CONVOLUTION1x5_NHWC(values0, (src_addr + 4 * (int)src_stride_z), (weights_addr + 4 * (int)weights_stride_z));
- src_addr += src_stride_x;
- weights_addr += weights_stride_x;
- }
-#endif /* PAD_TOP == 1 */
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
- values0 += (VEC_DATA_TYPE(DATA_TYPE, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, id0)));
-#endif /* defined(HAS_BIAS) */
-
- *((__global DATA_TYPE *)(dst.ptr + 0 * dst_stride_y)) = values0.s0;
- *((__global DATA_TYPE *)(dst.ptr + 1 * dst_stride_y)) = values0.s1;
- *((__global DATA_TYPE *)(dst.ptr + 2 * dst_stride_y)) = values0.s2;
- *((__global DATA_TYPE *)(dst.ptr + 3 * dst_stride_y)) = values0.s3;
- *((__global DATA_TYPE *)(dst.ptr + 4 * dst_stride_y)) = values0.s4;
- *((__global DATA_TYPE *)(dst.ptr + 5 * dst_stride_y)) = values0.s5;
- *((__global DATA_TYPE *)(dst.ptr + 6 * dst_stride_y)) = values0.s6;
- *((__global DATA_TYPE *)(dst.ptr + 7 * dst_stride_y)) = values0.s7;
-}
-
-#endif // defined(DATA_LAYOUT_NHWC)
-
/** This kernel performs a direct convolution to convolve the low three dimensions.
*
* @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
diff --git a/src/core/CL/cl_kernels/direct_convolution9x9.cl b/src/core/CL/cl_kernels/direct_convolution9x9.cl
deleted file mode 100644
index 64da38d64d..0000000000
--- a/src/core/CL/cl_kernels/direct_convolution9x9.cl
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "helpers.h"
-
-#undef CONVERT_SAT
-
-#if defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) && defined(DATA_LAYOUT_NHWC) && defined(PAD_TOP)
-
-#define PTR_TO_VALUE(PTR, DATA_TYPE) *((__global DATA_TYPE *)(PTR))
-
-#define CONVOLUTION1x9_STRIDE1_NHWC(acc, row_ptr, weights_ptr) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 8) \
- src0 = (VEC_DATA_TYPE(DATA_TYPE, 8))( \
- PTR_TO_VALUE(row_ptr + 0 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 1 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 2 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 3 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 4 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 5 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 6 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 7 * src_stride_y, DATA_TYPE)); \
- VEC_DATA_TYPE(DATA_TYPE, 8) \
- src1 = (VEC_DATA_TYPE(DATA_TYPE, 8))( \
- PTR_TO_VALUE(row_ptr + 8 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 9 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 10 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 11 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 12 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 13 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 14 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 15 * src_stride_y, DATA_TYPE)); \
- VEC_DATA_TYPE(DATA_TYPE, 8) \
- weights_values0 = (VEC_DATA_TYPE(DATA_TYPE, 8))( \
- PTR_TO_VALUE(weights_ptr + 0 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 1 * weights_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(weights_ptr + 2 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 3 * weights_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(weights_ptr + 4 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 5 * weights_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(weights_ptr + 6 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 7 * weights_stride_y, DATA_TYPE)); \
- DATA_TYPE weights_value1 = PTR_TO_VALUE(weights_ptr + 8 * weights_stride_y, DATA_TYPE); \
- acc += src0 * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s0; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1234, src0.s567, src1.s0) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s1; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s234, src0.s567, src1.s01) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s2; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s345, src0.s67, src1.s012) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s3; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s4567, src1.s0123) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s4; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s567, src1.s0123, src1.s4) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s5; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s67, src1.s012, src1.s345) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s6; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s7, src1.s0123, src1.s456) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s7; \
- acc += src1 * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_value1; \
- })
-
-#define CONVOLUTION1x9_STRIDE2_NHWC(acc, row_ptr, weights_ptr) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 16) \
- src0 = (VEC_DATA_TYPE(DATA_TYPE, 16))( \
- PTR_TO_VALUE(row_ptr + 0 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 1 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 2 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 3 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 4 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 5 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 6 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 7 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 8 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 9 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 10 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 11 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 12 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 13 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 14 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 15 * src_stride_y, DATA_TYPE)); \
- VEC_DATA_TYPE(DATA_TYPE, 8) \
- src1 = (VEC_DATA_TYPE(DATA_TYPE, 8))( \
- PTR_TO_VALUE(row_ptr + 16 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 17 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 18 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 19 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 20 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 21 * src_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(row_ptr + 22 * src_stride_y, DATA_TYPE), PTR_TO_VALUE(row_ptr + 23 * src_stride_y, DATA_TYPE)); \
- VEC_DATA_TYPE(DATA_TYPE, 8) \
- weights_values0 = (VEC_DATA_TYPE(DATA_TYPE, 8))( \
- PTR_TO_VALUE(weights_ptr + 0 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 1 * weights_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(weights_ptr + 2 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 3 * weights_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(weights_ptr + 4 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 5 * weights_stride_y, DATA_TYPE), \
- PTR_TO_VALUE(weights_ptr + 6 * weights_stride_y, DATA_TYPE), PTR_TO_VALUE(weights_ptr + 7 * weights_stride_y, DATA_TYPE)); \
- DATA_TYPE weights_value1 = PTR_TO_VALUE(weights_ptr + 8 * weights_stride_y, DATA_TYPE); \
- acc += src0.s02468ACE * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s0; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s1357, src0.s9BDF) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s1; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s2468, src0.sACE, src1.s0) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s2; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s3579, src0.sBDF, src1.s1) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s3; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s468A, src0.sCE, src1.s02) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s4; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s579, src0.sBDF, src1.s13) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s5; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s68A, src0.sCE, src1.s024) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s6; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s79B, src0.sDF, src1.s135) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_values0.s7; \
- acc += (VEC_DATA_TYPE(DATA_TYPE, 8))(src0.s8AC, src0.sE, src1.s0246) * (VEC_DATA_TYPE(DATA_TYPE, 8))weights_value1; \
- })
-
-#if defined(VEC_SIZE)
-#define VFMA(acc, w, src0, src1, src2, src3, src4, src5, src6, src7) \
- ({ \
- acc##0 = fma(src0, w, acc##0); \
- acc##1 = fma(src1, w, acc##1); \
- acc##2 = fma(src2, w, acc##2); \
- acc##3 = fma(src3, w, acc##3); \
- acc##4 = fma(src4, w, acc##4); \
- acc##5 = fma(src5, w, acc##5); \
- acc##6 = fma(src6, w, acc##6); \
- acc##7 = fma(src7, w, acc##7); \
- })
-
-#define CONVOLUTION1x9_STRIDE1_NHWC_BIFROST(acc, row_ptr, weights_ptr) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)row_ptr); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + src_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 2 * src_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 3 * src_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 4 * src_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 5 * src_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 6 * src_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 7 * src_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 8 * src_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src9 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 9 * src_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src10 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 10 * src_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src11 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 11 * src_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src12 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 12 * src_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src13 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 13 * src_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src14 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 14 * src_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- src15 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(row_ptr + 15 * src_stride_y)); \
- \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- w0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 0 * weights_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- w1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 1 * weights_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- w2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 2 * weights_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- w3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 3 * weights_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- w4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 4 * weights_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- w5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 5 * weights_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- w6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 6 * weights_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- w7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 7 * weights_stride_y)); \
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) \
- w8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(weights_ptr + 8 * weights_stride_y)); \
- \
- VFMA(acc, w0, src0, src1, src2, src3, src4, src5, src6, src7); \
- VFMA(acc, w1, src1, src2, src3, src4, src5, src6, src7, src8); \
- VFMA(acc, w2, src2, src3, src4, src5, src6, src7, src8, src9); \
- VFMA(acc, w3, src3, src4, src5, src6, src7, src8, src9, src10); \
- VFMA(acc, w4, src4, src5, src6, src7, src8, src9, src10, src11); \
- VFMA(acc, w5, src5, src6, src7, src8, src9, src10, src11, src12); \
- VFMA(acc, w6, src6, src7, src8, src9, src10, src11, src12, src13); \
- VFMA(acc, w7, src7, src8, src9, src10, src11, src12, src13, src14); \
- VFMA(acc, w8, src8, src9, src10, src11, src12, src13, src14, src15); \
- })
-
-#if VEC_SIZE == 4
-#define REDUCE(out, vec) \
- ({ \
- VEC_DATA_TYPE(DATA_TYPE, 2) \
- tmp1 = vec.s01 + vec.s23; \
- out = tmp1.s0 + tmp1.s1; \
- })
-#else // VEC_SIZE == 4
-#error("Not supported")
-#endif // VEC_SIZE == 4
-
-#if STRIDE_X == 1
-#define CONVOLUTION1x9_NHWC(acc, row_ptr, weights_ptr) CONVOLUTION1x9_STRIDE1_NHWC_BIFROST(acc, row_ptr, weights_ptr)
-#else // STRIDE_X == 1
-#error "Not supported"
-#endif // STRIDE_X == 1
-
-#else // defined(VEC_SIZE)
-
-#if STRIDE_X == 1
-#define CONVOLUTION1x9_NHWC(acc, row_ptr, weights_ptr) CONVOLUTION1x9_STRIDE1_NHWC(acc, row_ptr, weights_ptr)
-#elif STRIDE_X == 2 // STRIDE_X == 1
-#define CONVOLUTION1x9_NHWC(acc, row_ptr, weights_ptr) CONVOLUTION1x9_STRIDE2_NHWC(acc, row_ptr, weights_ptr)
-#else // STRIDE_X == 1
-#error "STRIDE_X larger than 2 is not supported"
-#endif // STRIDE_X == 1
-
-#endif // defined(VEC_SIZE)
-
-//#if defined(VEC_SIZE)
-/** This kernel performs a direct convolution to convolve the low three dimensions in a tensor with the NHWC data layout
- *
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
- * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
- * @note If biases are used then -DHAS_BIAS has to be passed at compile time
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr (Optional) Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_stride_x (Optional) Stride of the biases tensor in X dimension (in bytes)
- * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases tensor
- * @param[in] weights_stride_w (Optional) Stride of the weights tensor in the 4th dimension
- */
-__kernel void direct_convolution9x9_nhwc(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
-#ifdef HAS_BIAS
- VECTOR_DECLARATION(biases),
-#endif /* defined(HAS_BIAS) */
- unsigned int weights_stride_w)
-{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- values = 0;
-
-#if defined(VEC_SIZE)
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values0 = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values1 = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values2 = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values3 = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values4 = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values5 = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values6 = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- values7 = 0;
-#define STEP_X (VEC_SIZE)
-#else // defined(VEC_SIZE)
-#define STEP_X (1)
-#endif // defined(VEC_SIZE)
-
- const int id0 = get_global_id(0);
- const int id1 = get_global_id(1);
- const int id2 = get_global_id(2);
-
- __global uchar *weights_addr = (__global uchar *)tensor3D_offset(&weights, 0, 0, 0);
- __global uchar *src_addr = (__global uchar *)offset(&src, 0, 0) - src_stride_x * id0 + ((id2 * STRIDE_Y) - PAD_TOP) * (int)src_stride_z;
-
- weights_addr += id0 * weights_stride_w;
-
- const int coordy = (id2 * STRIDE_Y) - PAD_TOP;
- if(coordy < 0)
- {
- // Skip first rows containing padding
- for(volatile int d = 0; d < WEIGHTS_DEPTH; d += STEP_X)
- {
- const int start_z = -coordy;
- for(int i = start_z; i < 9; ++i)
- {
- CONVOLUTION1x9_NHWC(values, (src_addr + i * (int)src_stride_z), (weights_addr + i * (int)weights_stride_z));
- }
- src_addr += STEP_X * sizeof(DATA_TYPE);
- weights_addr += STEP_X * sizeof(DATA_TYPE);
- }
- }
- else if(coordy > (SRC_HEIGHT - 9))
- {
- for(volatile int d = 0; d < WEIGHTS_DEPTH; d += STEP_X)
- {
- // Avoid loading rows beyond the input height
- const int end_z = SRC_HEIGHT - coordy;
- for(int i = 0; i < end_z; ++i)
- {
- CONVOLUTION1x9_NHWC(values, (src_addr + i * (int)src_stride_z), (weights_addr + i * (int)weights_stride_z));
- }
- src_addr += STEP_X * sizeof(DATA_TYPE);
- weights_addr += STEP_X * sizeof(DATA_TYPE);
- }
- }
- else
- {
- for(volatile int d = 0; d < WEIGHTS_DEPTH; d += STEP_X)
- {
- CONVOLUTION1x9_NHWC(values, src_addr, weights_addr);
- CONVOLUTION1x9_NHWC(values, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z));
- CONVOLUTION1x9_NHWC(values, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z));
- CONVOLUTION1x9_NHWC(values, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z));
- CONVOLUTION1x9_NHWC(values, (src_addr + 4 * (int)src_stride_z), (weights_addr + 4 * (int)weights_stride_z));
- CONVOLUTION1x9_NHWC(values, (src_addr + 5 * (int)src_stride_z), (weights_addr + 5 * (int)weights_stride_z));
- CONVOLUTION1x9_NHWC(values, (src_addr + 6 * (int)src_stride_z), (weights_addr + 6 * (int)weights_stride_z));
- CONVOLUTION1x9_NHWC(values, (src_addr + 7 * (int)src_stride_z), (weights_addr + 7 * (int)weights_stride_z));
- CONVOLUTION1x9_NHWC(values, (src_addr + 8 * (int)src_stride_z), (weights_addr + 8 * (int)weights_stride_z));
- src_addr += STEP_X * sizeof(DATA_TYPE);
- weights_addr += STEP_X * sizeof(DATA_TYPE);
- }
- }
-
-#if defined(VEC_SIZE)
- REDUCE(values.s0, values0);
- REDUCE(values.s1, values1);
- REDUCE(values.s2, values2);
- REDUCE(values.s3, values3);
- REDUCE(values.s4, values4);
- REDUCE(values.s5, values5);
- REDUCE(values.s6, values6);
- REDUCE(values.s7, values7);
-#endif // defined(VEC_SIZE)
-
-#if defined(HAS_BIAS)
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
- values += (VEC_DATA_TYPE(DATA_TYPE, 8)) * ((__global DATA_TYPE *)(vector_offset(&biases, id0)));
-#endif // defined(HAS_BIAS)
-
- *((__global DATA_TYPE *)(dst.ptr + 0 * dst_stride_y)) = values.s0;
- *((__global DATA_TYPE *)(dst.ptr + 1 * dst_stride_y)) = values.s1;
- *((__global DATA_TYPE *)(dst.ptr + 2 * dst_stride_y)) = values.s2;
- *((__global DATA_TYPE *)(dst.ptr + 3 * dst_stride_y)) = values.s3;
- *((__global DATA_TYPE *)(dst.ptr + 4 * dst_stride_y)) = values.s4;
- *((__global DATA_TYPE *)(dst.ptr + 5 * dst_stride_y)) = values.s5;
- *((__global DATA_TYPE *)(dst.ptr + 6 * dst_stride_y)) = values.s6;
- *((__global DATA_TYPE *)(dst.ptr + 7 * dst_stride_y)) = values.s7;
-#undef STEP_X
-}
-#endif // defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) && defined(DATA_LAYOUT_NHWC) && defined(PAD_TOP)
diff --git a/src/core/CL/cl_kernels/direct_convolution_quantized.cl b/src/core/CL/cl_kernels/direct_convolution_quantized.cl
index 8237fe1700..b80d4f587e 100644
--- a/src/core/CL/cl_kernels/direct_convolution_quantized.cl
+++ b/src/core/CL/cl_kernels/direct_convolution_quantized.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,571 +31,6 @@
#define CONVERT_SAT_STR(x, type) (convert_##type##8_sat((x)))
#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
-#if defined(DATA_LAYOUT_NHWC)
-
-#if KERNEL_SIZE == 9
-
-#if STRIDE_X == 1
-#define CONVOLUTION1x9(acc, src_ptr, weights_ptr) CONVOLUTION1x9_STRIDE1(acc, src_ptr, weights_ptr)
-#elif STRIDE_X == 2
-#define CONVOLUTION1x9(acc, src_ptr, weights_ptr) CONVOLUTION1x9_STRIDE2(acc, src_ptr, weights_ptr)
-#else /* STRIDE_X not equals 1 or 2 */
-#error "STRIDE_X larger than 2 is not supported"
-#endif /* STRIDE_X */
-
-#define CONVOLUTION1x9_STRIDE1(acc, src_ptr, weights_ptr) \
- ({ \
- int8 weights_values0 = 0; \
- int weights_value1 = 0; \
- weights_values0.s0 = convert_int(*(weights_ptr + 0 * weights_stride_y)); \
- weights_values0.s1 = convert_int(*(weights_ptr + 1 * weights_stride_y)); \
- weights_values0.s2 = convert_int(*(weights_ptr + 2 * weights_stride_y)); \
- weights_values0.s3 = convert_int(*(weights_ptr + 3 * weights_stride_y)); \
- weights_values0.s4 = convert_int(*(weights_ptr + 4 * weights_stride_y)); \
- weights_values0.s5 = convert_int(*(weights_ptr + 5 * weights_stride_y)); \
- weights_values0.s6 = convert_int(*(weights_ptr + 6 * weights_stride_y)); \
- weights_values0.s7 = convert_int(*(weights_ptr + 7 * weights_stride_y)); \
- weights_value1 = convert_int(*(weights_ptr + 8 * weights_stride_y)); \
- \
- int8 src0 = 0; \
- int8 src1 = 0; \
- src0.s0 = convert_int(*(src_ptr + 0 * weights_stride_y)); \
- src0.s1 = convert_int(*(src_ptr + 1 * weights_stride_y)); \
- src0.s2 = convert_int(*(src_ptr + 2 * weights_stride_y)); \
- src0.s3 = convert_int(*(src_ptr + 3 * weights_stride_y)); \
- src0.s4 = convert_int(*(src_ptr + 4 * weights_stride_y)); \
- src0.s5 = convert_int(*(src_ptr + 5 * weights_stride_y)); \
- src0.s6 = convert_int(*(src_ptr + 6 * weights_stride_y)); \
- src0.s7 = convert_int(*(src_ptr + 7 * weights_stride_y)); \
- src1.s0 = convert_int(*(src_ptr + 8 * weights_stride_y)); \
- src1.s1 = convert_int(*(src_ptr + 9 * weights_stride_y)); \
- src1.s2 = convert_int(*(src_ptr + 10 * weights_stride_y)); \
- src1.s3 = convert_int(*(src_ptr + 11 * weights_stride_y)); \
- src1.s4 = convert_int(*(src_ptr + 12 * weights_stride_y)); \
- src1.s5 = convert_int(*(src_ptr + 13 * weights_stride_y)); \
- src1.s6 = convert_int(*(src_ptr + 14 * weights_stride_y)); \
- src1.s7 = convert_int(*(src_ptr + 15 * weights_stride_y)); \
- \
- acc += src0 * (int8)weights_values0.s0; \
- acc += (int8)(src0.s1234, src0.s567, src1.s0) * (int8)weights_values0.s1; \
- acc += (int8)(src0.s234, src0.s567, src1.s01) * (int8)weights_values0.s2; \
- acc += (int8)(src0.s345, src0.s67, src1.s012) * (int8)weights_values0.s3; \
- acc += (int8)(src0.s4567, src1.s0123) * (int8)weights_values0.s4; \
- acc += (int8)(src0.s567, src1.s0123, src1.s4) * (int8)weights_values0.s5; \
- acc += (int8)(src0.s67, src1.s012, src1.s345) * (int8)weights_values0.s6; \
- acc += (int8)(src0.s7, src1.s0123, src1.s456) * (int8)weights_values0.s7; \
- acc += src1 * (int8)weights_value1; \
- })
-
-#define CONVOLUTION1x9_STRIDE2(acc, src_ptr, weights_ptr) \
- ({ \
- int8 weights_values0 = 0; \
- int weights_value1 = 0; \
- weights_values0.s0 = convert_int(*(weights_ptr + 0 * weights_stride_y)); \
- weights_values0.s1 = convert_int(*(weights_ptr + 1 * weights_stride_y)); \
- weights_values0.s2 = convert_int(*(weights_ptr + 2 * weights_stride_y)); \
- weights_values0.s3 = convert_int(*(weights_ptr + 3 * weights_stride_y)); \
- weights_values0.s4 = convert_int(*(weights_ptr + 4 * weights_stride_y)); \
- weights_values0.s5 = convert_int(*(weights_ptr + 5 * weights_stride_y)); \
- weights_values0.s6 = convert_int(*(weights_ptr + 6 * weights_stride_y)); \
- weights_values0.s7 = convert_int(*(weights_ptr + 7 * weights_stride_y)); \
- weights_value1 = convert_int(*(weights_ptr + 8 * weights_stride_y)); \
- \
- int16 src0 = 0; \
- int8 src1 = 0; \
- src0.s0 = convert_int(*(src_ptr + 0 * weights_stride_y)); \
- src0.s1 = convert_int(*(src_ptr + 1 * weights_stride_y)); \
- src0.s2 = convert_int(*(src_ptr + 2 * weights_stride_y)); \
- src0.s3 = convert_int(*(src_ptr + 3 * weights_stride_y)); \
- src0.s4 = convert_int(*(src_ptr + 4 * weights_stride_y)); \
- src0.s5 = convert_int(*(src_ptr + 5 * weights_stride_y)); \
- src0.s6 = convert_int(*(src_ptr + 6 * weights_stride_y)); \
- src0.s7 = convert_int(*(src_ptr + 7 * weights_stride_y)); \
- src0.s8 = convert_int(*(src_ptr + 8 * weights_stride_y)); \
- src0.s9 = convert_int(*(src_ptr + 9 * weights_stride_y)); \
- src0.sA = convert_int(*(src_ptr + 10 * weights_stride_y)); \
- src0.sB = convert_int(*(src_ptr + 11 * weights_stride_y)); \
- src0.sC = convert_int(*(src_ptr + 12 * weights_stride_y)); \
- src0.sD = convert_int(*(src_ptr + 13 * weights_stride_y)); \
- src0.sE = convert_int(*(src_ptr + 14 * weights_stride_y)); \
- src0.sF = convert_int(*(src_ptr + 15 * weights_stride_y)); \
- src1.s0 = convert_int(*(src_ptr + 16 * weights_stride_y)); \
- src1.s1 = convert_int(*(src_ptr + 17 * weights_stride_y)); \
- src1.s2 = convert_int(*(src_ptr + 18 * weights_stride_y)); \
- src1.s3 = convert_int(*(src_ptr + 19 * weights_stride_y)); \
- src1.s4 = convert_int(*(src_ptr + 20 * weights_stride_y)); \
- src1.s5 = convert_int(*(src_ptr + 21 * weights_stride_y)); \
- src1.s6 = convert_int(*(src_ptr + 22 * weights_stride_y)); \
- src1.s7 = convert_int(*(src_ptr + 23 * weights_stride_y)); \
- \
- acc += src0.s02468ACE * (int8)weights_values0.s0; \
- acc += (int8)(src0.s1357, src0.s9BDF) * (int8)weights_values0.s1; \
- acc += (int8)(src0.s2468, src0.sACE, src1.s0) * (int8)weights_values0.s2; \
- acc += (int8)(src0.s3579, src0.sBDF, src1.s1) * (int8)weights_values0.s3; \
- acc += (int8)(src0.s468A, src0.sCE, src1.s02) * (int8)weights_values0.s4; \
- acc += (int8)(src0.s579, src0.sBDF, src1.s13) * (int8)weights_values0.s5; \
- acc += (int8)(src0.s68A, src0.sCE, src1.s024) * (int8)weights_values0.s6; \
- acc += (int8)(src0.s79B, src0.sDF, src1.s135) * (int8)weights_values0.s7; \
- acc += (int8)(src0.s8AC, src0.sE, src1.s0246) * (int8)weights_value1; \
- })
-
-#elif KERNEL_SIZE == 5
-
-#if STRIDE_X == 1
-#define CONVOLUTION1x5(acc, src_ptr, weights_ptr) CONVOLUTION1x5_STRIDE1(acc, src_ptr, weights_ptr)
-#elif STRIDE_X == 2
-#define CONVOLUTION1x5(acc, src_ptr, weights_ptr) CONVOLUTION1x5_STRIDE2(acc, src_ptr, weights_ptr)
-#else /* STRIDE_X not equals 1 or 2 */
-#error "STRIDE_X larger than 2 is not supported"
-#endif /* STRIDE_X */
-
-#define CONVOLUTION1x5_STRIDE1(acc, src_ptr, weights_ptr) \
- ({ \
- int4 weights_values0 = 0; \
- int weights_value1 = 0; \
- weights_values0.s0 = convert_int(*(weights_ptr + 0 * weights_stride_y)); \
- weights_values0.s1 = convert_int(*(weights_ptr + 1 * weights_stride_y)); \
- weights_values0.s2 = convert_int(*(weights_ptr + 2 * weights_stride_y)); \
- weights_values0.s3 = convert_int(*(weights_ptr + 3 * weights_stride_y)); \
- weights_value1 = convert_int(*(weights_ptr + 4 * weights_stride_y)); \
- \
- int8 src0 = 0; \
- int4 src1 = 0; \
- src0.s0 = convert_int(*(src_ptr + 0 * weights_stride_y)); \
- src0.s1 = convert_int(*(src_ptr + 1 * weights_stride_y)); \
- src0.s2 = convert_int(*(src_ptr + 2 * weights_stride_y)); \
- src0.s3 = convert_int(*(src_ptr + 3 * weights_stride_y)); \
- src0.s4 = convert_int(*(src_ptr + 4 * weights_stride_y)); \
- src0.s5 = convert_int(*(src_ptr + 5 * weights_stride_y)); \
- src0.s6 = convert_int(*(src_ptr + 6 * weights_stride_y)); \
- src0.s7 = convert_int(*(src_ptr + 7 * weights_stride_y)); \
- src1.s0 = convert_int(*(src_ptr + 8 * weights_stride_y)); \
- src1.s1 = convert_int(*(src_ptr + 9 * weights_stride_y)); \
- src1.s2 = convert_int(*(src_ptr + 10 * weights_stride_y)); \
- src1.s3 = convert_int(*(src_ptr + 11 * weights_stride_y)); \
- \
- acc += (src0 + input_offset) * ((int8)weights_values0.s0 + weight_offset); \
- acc += ((int8)(src0.s1234, src0.s567, src1.s0) + input_offset) * ((int8)weights_values0.s1 + weight_offset); \
- acc += ((int8)(src0.s234, src0.s567, src1.s01) + input_offset) * ((int8)weights_values0.s2 + weight_offset); \
- acc += ((int8)(src0.s345, src0.s67, src1.s012) + input_offset) * ((int8)weights_values0.s3 + weight_offset); \
- acc += ((int8)(src0.s45, src0.s67, src1.s0123) + input_offset) * ((int8)weights_value1 + weight_offset); \
- })
-
-#define CONVOLUTION1x5_STRIDE2(acc, src_ptr, weights_ptr) \
- ({ \
- int4 weights_values0 = 0; \
- int weights_value1 = 0; \
- weights_values0.s0 = convert_int(*(weights_ptr + 0 * weights_stride_y)); \
- weights_values0.s1 = convert_int(*(weights_ptr + 1 * weights_stride_y)); \
- weights_values0.s2 = convert_int(*(weights_ptr + 2 * weights_stride_y)); \
- weights_values0.s3 = convert_int(*(weights_ptr + 3 * weights_stride_y)); \
- weights_value1 = convert_int(*(weights_ptr + 4 * weights_stride_y)); \
- \
- int16 src0 = 0; \
- int4 src1 = 0; \
- src0.s0 = convert_int(*(src_ptr + 0 * weights_stride_y)); \
- src0.s1 = convert_int(*(src_ptr + 1 * weights_stride_y)); \
- src0.s2 = convert_int(*(src_ptr + 2 * weights_stride_y)); \
- src0.s3 = convert_int(*(src_ptr + 3 * weights_stride_y)); \
- src0.s4 = convert_int(*(src_ptr + 4 * weights_stride_y)); \
- src0.s5 = convert_int(*(src_ptr + 5 * weights_stride_y)); \
- src0.s6 = convert_int(*(src_ptr + 6 * weights_stride_y)); \
- src0.s7 = convert_int(*(src_ptr + 7 * weights_stride_y)); \
- src0.s8 = convert_int(*(src_ptr + 8 * weights_stride_y)); \
- src0.s9 = convert_int(*(src_ptr + 9 * weights_stride_y)); \
- src0.sa = convert_int(*(src_ptr + 10 * weights_stride_y)); \
- src0.sb = convert_int(*(src_ptr + 11 * weights_stride_y)); \
- src0.sc = convert_int(*(src_ptr + 12 * weights_stride_y)); \
- src0.sd = convert_int(*(src_ptr + 13 * weights_stride_y)); \
- src0.se = convert_int(*(src_ptr + 14 * weights_stride_y)); \
- src0.sf = convert_int(*(src_ptr + 15 * weights_stride_y)); \
- src1.s0 = convert_int(*(src_ptr + 16 * weights_stride_y)); \
- src1.s1 = convert_int(*(src_ptr + 17 * weights_stride_y)); \
- src1.s2 = convert_int(*(src_ptr + 18 * weights_stride_y)); \
- src1.s3 = convert_int(*(src_ptr + 19 * weights_stride_y)); \
- \
- acc += (src0.even + input_offset) * ((int8)weights_values0.s0 + weight_offset); \
- acc += ((int8)(src0.s1357, src0.s9BDF) + input_offset) * ((int8)weights_values0.s1 + weight_offset); \
- acc += ((int8)(src0.s2468, src0.sACE, src1.s0) + input_offset) * ((int8)weights_values0.s2 + weight_offset); \
- acc += ((int8)(src0.s3579, src0.sBDF, src1.s1) + input_offset) * ((int8)weights_values0.s3 + weight_offset); \
- acc += ((int8)(src0.s468a, src0.sCE, src1.s02) + input_offset) * ((int8)weights_value1 + weight_offset); \
- })
-
-#elif KERNEL_SIZE == 3
-
-#if STRIDE_X == 1
-#define CONVOLUTION1x3(acc, src_ptr, weights_ptr) CONVOLUTION1x3_STRIDE1(acc, src_ptr, weights_ptr)
-#elif STRIDE_X == 2
-#define CONVOLUTION1x3(acc, src_ptr, weights_ptr) CONVOLUTION1x3_STRIDE2(acc, src_ptr, weights_ptr)
-#else /* STRIDE_X not equals 1 or 2 */
-#error "STRIDE_X larger than 2 is not supported"
-#endif /* STRIDE_X */
-
-#define CONVOLUTION1x3_STRIDE1(acc, src_ptr, weights_ptr) \
- ({ \
- int3 weights_values0 = 0; \
- weights_values0.s0 = convert_int(*(weights_ptr + 0 * weights_stride_y)); \
- weights_values0.s1 = convert_int(*(weights_ptr + 1 * weights_stride_y)); \
- weights_values0.s2 = convert_int(*(weights_ptr + 2 * weights_stride_y)); \
- \
- int8 src0 = 0; \
- int2 src1 = 0; \
- src0.s0 = convert_int(*(src_ptr + 0 * weights_stride_y)); \
- src0.s1 = convert_int(*(src_ptr + 1 * weights_stride_y)); \
- src0.s2 = convert_int(*(src_ptr + 2 * weights_stride_y)); \
- src0.s3 = convert_int(*(src_ptr + 3 * weights_stride_y)); \
- src0.s4 = convert_int(*(src_ptr + 4 * weights_stride_y)); \
- src0.s5 = convert_int(*(src_ptr + 5 * weights_stride_y)); \
- src0.s6 = convert_int(*(src_ptr + 6 * weights_stride_y)); \
- src0.s7 = convert_int(*(src_ptr + 7 * weights_stride_y)); \
- src1.s0 = convert_int(*(src_ptr + 8 * weights_stride_y)); \
- src1.s1 = convert_int(*(src_ptr + 9 * weights_stride_y)); \
- \
- acc += (src0 + input_offset) * ((int8)weights_values0.s0 + weight_offset); \
- acc += ((int8)(src0.s1234, src0.s567, src1.s0) + input_offset) * ((int8)weights_values0.s1 + weight_offset); \
- acc += ((int8)(src0.s234, src0.s567, src1.s01) + input_offset) * ((int8)weights_values0.s2 + weight_offset); \
- })
-
-#define CONVOLUTION1x3_STRIDE2(acc, src_ptr, weights_ptr) \
- ({ \
- int3 weights_values0 = 0; \
- weights_values0.s0 = convert_int(*(weights_ptr + 0 * weights_stride_y)); \
- weights_values0.s1 = convert_int(*(weights_ptr + 1 * weights_stride_y)); \
- weights_values0.s2 = convert_int(*(weights_ptr + 2 * weights_stride_y)); \
- \
- int16 src0 = 0; \
- int src1 = 0; \
- src0.s0 = convert_int(*(src_ptr + 0 * src_stride_y)); \
- src0.s1 = convert_int(*(src_ptr + 1 * src_stride_y)); \
- src0.s2 = convert_int(*(src_ptr + 2 * src_stride_y)); \
- src0.s3 = convert_int(*(src_ptr + 3 * src_stride_y)); \
- src0.s4 = convert_int(*(src_ptr + 4 * src_stride_y)); \
- src0.s5 = convert_int(*(src_ptr + 5 * src_stride_y)); \
- src0.s6 = convert_int(*(src_ptr + 6 * src_stride_y)); \
- src0.s7 = convert_int(*(src_ptr + 7 * src_stride_y)); \
- src0.s8 = convert_int(*(src_ptr + 8 * src_stride_y)); \
- src0.s9 = convert_int(*(src_ptr + 9 * src_stride_y)); \
- src0.sa = convert_int(*(src_ptr + 10 * src_stride_y)); \
- src0.sb = convert_int(*(src_ptr + 11 * src_stride_y)); \
- src0.sc = convert_int(*(src_ptr + 12 * src_stride_y)); \
- src0.sd = convert_int(*(src_ptr + 13 * src_stride_y)); \
- src0.se = convert_int(*(src_ptr + 14 * src_stride_y)); \
- src0.sf = convert_int(*(src_ptr + 15 * src_stride_y)); \
- src1 = convert_int(*(src_ptr + 16 * src_stride_y)); \
- acc += (src0.even + input_offset) * ((int8)weights_values0.s0 + weight_offset); \
- acc += ((int8)(src0.s1357, src0.s9BDF) + input_offset) * ((int8)weights_values0.s1 + weight_offset); \
- acc += ((int8)(src0.s2468, src0.sACE, src1) + input_offset) * ((int8)weights_values0.s2 + weight_offset); \
- })
-
-#elif KERNEL_SIZE == 1
-
-#if STRIDE_X == 3
-#define INPUT_VALUE extract_input_stride3
-#elif STRIDE_X == 2
-#define INPUT_VALUE extract_input_stride2
-#elif STRIDE_X == 1
-#define INPUT_VALUE extract_input_stride1
-
-#else /* STRIDE_X not equals 1, 2 or 3 */
-#error "Only support strides 1, 2 and 3"
-#endif /* STRIDE_X */
-
-#endif // KERNEL_SIZE == 1
-
-/** Extracts a 1D horizontal vector from the input tensor with stride as 1.
- *
- * @param[in] input_value Pointer to the first value.
- *
- * @return extracted input values.
- */
-inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride1(__global const DATA_TYPE *input_value, const uchar stride_y)
-{
- VEC_DATA_TYPE(DATA_TYPE, 8)
- vals;
- vals.s0 = *(input_value + 0 * stride_y);
- vals.s1 = *(input_value + 1 * stride_y);
- vals.s2 = *(input_value + 2 * stride_y);
- vals.s3 = *(input_value + 3 * stride_y);
- vals.s4 = *(input_value + 4 * stride_y);
- vals.s5 = *(input_value + 5 * stride_y);
- vals.s6 = *(input_value + 6 * stride_y);
- vals.s7 = *(input_value + 7 * stride_y);
-
- return vals;
-}
-
-/** Extracts a 1D horizontal vector from the input tensor with stride as 2.
- *
- * @param[in] input_value Pointer to the first value.
- *
- * @return extracted input values.
- */
-inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride2(__global const DATA_TYPE *input_value, const uchar stride_y)
-{
- VEC_DATA_TYPE(DATA_TYPE, 8)
- vals;
- vals.s0 = *(input_value + 0 * stride_y);
- vals.s1 = *(input_value + 2 * stride_y);
- vals.s2 = *(input_value + 4 * stride_y);
- vals.s3 = *(input_value + 6 * stride_y);
- vals.s4 = *(input_value + 8 * stride_y);
- vals.s5 = *(input_value + 10 * stride_y);
- vals.s6 = *(input_value + 12 * stride_y);
- vals.s7 = *(input_value + 14 * stride_y);
-
- return vals;
-}
-
-/** Extracts a 1D horizontal vector from the input tensor with stride as 3 and 8-bit data size.
- *
- * @param[in] input_value Pointer to the first value.
- *
- * @return extracted input values.
- */
-inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride3(__global const DATA_TYPE *input_value, const uchar stride_y)
-{
- VEC_DATA_TYPE(DATA_TYPE, 8)
- vals;
- vals.s0 = *(input_value + 0 * stride_y);
- vals.s1 = *(input_value + 3 * stride_y);
- vals.s2 = *(input_value + 6 * stride_y);
- vals.s3 = *(input_value + 9 * stride_y);
- vals.s4 = *(input_value + 12 * stride_y);
- vals.s5 = *(input_value + 15 * stride_y);
- vals.s6 = *(input_value + 18 * stride_y);
- vals.s7 = *(input_value + 21 * stride_y);
-
- return vals;
-}
-
-/** This kernel performs a direct convolution to convolve the low three dimensions.
- *
- * @note The convolution stride x must be passed at compile time using -DSTRIDE_X e.g. -DSTRIDE_X=1
- * @note The third dimensions of the weights tensors must be passed at compile time using -DWEIGHTS_DEPTH
- * @note If biases are used then -DHAS_BIAS has to be passed at compile time
- * @note The output quantization multiplier must be passed at compile time using -DOUTPUT_MULTIPLIER e.g. -DOUTPUT_MULTIPLIER=1234
- * @note The output quantization shift must be passed at compile time using -DOUTPUT_SHIFT e.g. -DOUTPUT_SHIFT=4
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
- * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] dst_step_y dst_stride_y * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes)
- * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes)
- * @param[in] weights_step_y weights_stride_y * number of elements along y processed per workitem(in bytes)
- * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes)
- * @param[in] weights_step_z weights_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Supported data types: S32
- * @param[in] biases_stride_x Stride of the biases tensor in X dimension (in bytes)
- * @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
- * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- * @param[in] input_offset Input offset quantization parameter
- * @param[in] weight_offset Weights offset quantization parameter
- * @param[in] output_offset Output offset quantization parameter
- */
-__kernel void direct_convolution_quantized(
- TENSOR3D_DECLARATION(src),
- TENSOR3D_DECLARATION(dst),
- TENSOR3D_DECLARATION(weights),
-#ifdef HAS_BIAS
- VECTOR_DECLARATION(biases),
-#endif /* defined(HAS_BIAS) */
- unsigned int weights_stride_w,
- int input_offset,
- int weight_offset,
- int output_offset)
-{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
- Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
-
- int8 values0 = 0;
-
- const int id0 = get_global_id(0);
- const int y_coord = (get_global_id(2) * STRIDE_Y) - PAD_TOP;
-
- __global DATA_TYPE *weights_addr = (__global DATA_TYPE *)tensor3D_offset(&weights, 0, 0, 0);
- __global DATA_TYPE *src_addr = (__global DATA_TYPE *)offset(&src, 0, 0) - src_stride_x * id0 + y_coord * (int)src_stride_z;
-
- weights_addr += id0 * weights_stride_w;
-
- for(volatile int d = 0; d < WEIGHTS_DEPTH; ++d)
- {
-#if KERNEL_SIZE == 9
- if(y_coord < 0)
- {
- const int start_z = -y_coord;
- for(int i = start_z; i < 9; ++i)
- {
- CONVOLUTION1x9(values0, (src_addr + i * (int)src_stride_z), (weights_addr + i * (int)weights_stride_z));
- }
- }
- else if(y_coord > (SRC_HEIGHT - 9))
- {
- // Avoid loading rows beyond the input height
- const int end_z = SRC_HEIGHT - y_coord;
- for(int i = 0; i < end_z; ++i)
- {
- CONVOLUTION1x9(values0, (src_addr + i * (int)src_stride_z), (weights_addr + i * (int)weights_stride_z));
- }
- }
- else
- {
- CONVOLUTION1x9(values0, src_addr, weights_addr);
- CONVOLUTION1x9(values0, (src_addr + 1 * (int)src_stride_z), (weights_addr + 1 * (int)weights_stride_z));
- CONVOLUTION1x9(values0, (src_addr + 2 * (int)src_stride_z), (weights_addr + 2 * (int)weights_stride_z));
- CONVOLUTION1x9(values0, (src_addr + 3 * (int)src_stride_z), (weights_addr + 3 * (int)weights_stride_z));
- CONVOLUTION1x9(values0, (src_addr + 4 * (int)src_stride_z), (weights_addr + 4 * (int)weights_stride_z));
- CONVOLUTION1x9(values0, (src_addr + 5 * (int)src_stride_z), (weights_addr + 5 * (int)weights_stride_z));
- CONVOLUTION1x9(values0, (src_addr + 6 * (int)src_stride_z), (weights_addr + 6 * (int)weights_stride_z));
- CONVOLUTION1x9(values0, (src_addr + 7 * (int)src_stride_z), (weights_addr + 7 * (int)weights_stride_z));
- CONVOLUTION1x9(values0, (src_addr + 8 * (int)src_stride_z), (weights_addr + 8 * (int)weights_stride_z));
- }
-#elif KERNEL_SIZE == 5
-#if(PAD_TOP == 1) || (PAD_BOTTM == 1)
- if(y_coord < 0) // special case Z = -1 doesn't exists
- {
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 3 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 4 * weights_stride_z));
- }
- else if(get_global_id(2) == (DST_HEIGHT - 1))
- {
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 3 * weights_stride_z));
- }
- else
- {
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 3 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 4 * weights_stride_z));
- }
-#elif(PAD_TOP == 2) || (PAD_BOTTM == 2)
- if(y_coord < -1)
- {
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 3 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 4 * weights_stride_z));
- }
- else if(y_coord == -1)
- {
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 3 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 4 * weights_stride_z));
- }
- else if(y_coord == (SRC_HEIGHT - 3))
- {
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_z));
- }
- else if(y_coord >= (SRC_HEIGHT - 4))
- {
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 3 * weights_stride_z));
- }
- else
- {
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 3 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 4 * weights_stride_z));
- }
-#else /* PAD_TOP == 2 || || PAD_BOTTM == 2 */
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 3 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 3 * weights_stride_z));
- CONVOLUTION1x5(values0, (__global DATA_TYPE *)(src_addr + 4 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 4 * weights_stride_z));
-#endif /* PAD_TOP == 1 || || PAD_BOTTM == 1 */
-#elif KERNEL_SIZE == 3
-#if(PAD_TOP > 0) || (PAD_BOTTOM > 0)
- if(y_coord < 0) // special case Z = -1 doesn't exists
- {
- //skip first row and load the two next ones
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_z));
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_z));
- }
- else if(y_coord == (SRC_HEIGHT - PAD_BOTTOM - 1))
- {
- // special case when computing the last row of the output we must read the last three rows from the input buffer (including padding) but the
- // Z axis has no padding at all.
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_z));
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_z));
- }
- else
- {
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_z));
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_z));
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_z));
- }
-#else // PAD_TOP > 0 || PAD_BOTTOM > 0
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 0 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 0 * weights_stride_z));
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 1 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 1 * weights_stride_z));
- CONVOLUTION1x3(values0, (__global DATA_TYPE *)(src_addr + 2 * src_stride_z), (__global DATA_TYPE *)(weights_addr + 2 * weights_stride_z));
-#endif // PAD_TOP > 0 || PAD_BOTTOM > 0
-#elif KERNEL_SIZE == 1
- int weight = convert_int(*(__global DATA_TYPE *)weights_addr);
- int8 input_value = convert_int8(INPUT_VALUE((__global DATA_TYPE *)src_addr, src_stride_y));
- values0 += (input_value + input_offset) * ((int8)weight + weight_offset);
-#endif /* (KERNEL_SIZE == 1) || (KERNEL_SIZE == 3) || (KERNEL_SIZE == 5) */
-
- src_addr += src_stride_x;
- weights_addr += weights_stride_x;
- }
-
-#ifdef HAS_BIAS
- Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases);
- __global int *bias_addr = ((__global int *)(vector_offset(&biases, id0)));
- values0 += (int8)(*bias_addr);
-#endif /* defined(HAS_BIAS) */
-
-#if OUTPUT_SHIFT < 0
- values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#else // OUTPUT_SHIFT < 0
- values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
-#endif // OUTPUT_SHIFT < 0
- values0 = values0 + output_offset;
-
- VEC_DATA_TYPE(DATA_TYPE, 8)
- values = CONVERT_SAT(values0, DATA_TYPE);
- *(dst.ptr + 0 * dst_stride_y) = values.s0;
- *(dst.ptr + 1 * dst_stride_y) = values.s1;
- *(dst.ptr + 2 * dst_stride_y) = values.s2;
- *(dst.ptr + 3 * dst_stride_y) = values.s3;
- *(dst.ptr + 4 * dst_stride_y) = values.s4;
- *(dst.ptr + 5 * dst_stride_y) = values.s5;
- *(dst.ptr + 6 * dst_stride_y) = values.s6;
- *(dst.ptr + 7 * dst_stride_y) = values.s7;
-}
-
-#else // defined(DATA_LAYOUT_NHWC)
-
#if KERNEL_SIZE == 9
#if STRIDE_X == 1
@@ -606,37 +41,37 @@ __kernel void direct_convolution_quantized(
#error "STRIDE_X larger than 2 is not supported"
#endif /* STRIDE_X */
-#define CONVOLUTION1x9_STRIDE1(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- int8 weights_values0 = convert_int8(vload8(0, weights_row_ptr)); \
- int weights_value1 = convert_int(*(weights_row_ptr + 8)); \
- int16 src0 = convert_int16(vload16(0, src_row_ptr)); \
- acc += (src0.lo + input_offset) * ((int8)weights_values0.s0 + weight_offset); \
- acc += ((int8)(src0.s1234, src0.s5678) + input_offset) * ((int8)weights_values0.s1 + weight_offset); \
- acc += ((int8)(src0.s2345, src0.s6789) + input_offset) * ((int8)weights_values0.s2 + weight_offset); \
- acc += ((int8)(src0.s3456, src0.s789A) + input_offset) * ((int8)weights_values0.s3 + weight_offset); \
- acc += ((int8)(src0.s4567, src0.s89AB) + input_offset) * ((int8)weights_values0.s4 + weight_offset); \
- acc += ((int8)(src0.s5678, src0.s9ABC) + input_offset) * ((int8)weights_values0.s5 + weight_offset); \
- acc += ((int8)(src0.s6789, src0.sABCD) + input_offset) * ((int8)weights_values0.s6 + weight_offset); \
- acc += ((int8)(src0.s789A, src0.sBCDE) + input_offset) * ((int8)weights_values0.s7 + weight_offset); \
- acc += ((int8)(src0.s89AB, src0.sCDEF) + input_offset) * ((int8)weights_value1 + weight_offset); \
+#define CONVOLUTION1x9_STRIDE1(acc, src_row_ptr, weights_row_ptr) \
+ ({ \
+ int8 weights_values0 = convert_int8(vload8(0, weights_row_ptr)); \
+ int weights_value1 = convert_int(*(weights_row_ptr + 8)); \
+ int16 src0 = convert_int16(vload16(0, src_row_ptr)); \
+ acc += (src0.lo + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s1234, src0.s5678) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s2345, src0.s6789) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s3456, src0.s789A) + INPUT_OFFSET) * ((int8)weights_values0.s3 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s4567, src0.s89AB) + INPUT_OFFSET) * ((int8)weights_values0.s4 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s5678, src0.s9ABC) + INPUT_OFFSET) * ((int8)weights_values0.s5 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s6789, src0.sABCD) + INPUT_OFFSET) * ((int8)weights_values0.s6 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s789A, src0.sBCDE) + INPUT_OFFSET) * ((int8)weights_values0.s7 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s89AB, src0.sCDEF) + INPUT_OFFSET) * ((int8)weights_value1 + WEIGHTS_OFFSET); \
})
-#define CONVOLUTION1x9_STRIDE2(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- int8 weights_values0 = convert_int8(vload8(0, weights_row_ptr)); \
- int weights_value1 = convert_int(*(weights_row_ptr + 8)); \
- int16 src0 = convert_int16(vload16(0, src_row_ptr)); \
- int8 src1 = convert_int8(vload8(0, src_row_ptr + 16)); \
- acc += (src0.even + input_offset) * ((int8)weights_values0.s0 + weight_offset); \
- acc += ((int8)(src0.s1357, src0.s9BDF) + input_offset) * ((int8)weights_values0.s1 + weight_offset); \
- acc += ((int8)(src0.s2468, src0.sACE, src1.s0) + input_offset) * ((int8)weights_values0.s2 + weight_offset); \
- acc += ((int8)(src0.s3579, src0.sBDF, src1.s1) + input_offset) * ((int8)weights_values0.s3 + weight_offset); \
- acc += ((int8)(src0.s468A, src0.sCE, src1.s02) + input_offset) * ((int8)weights_values0.s4 + weight_offset); \
- acc += ((int8)(src0.s579B, src0.sDF, src1.s13) + input_offset) * ((int8)weights_values0.s5 + weight_offset); \
- acc += ((int8)(src0.s68AC, src0.sE, src1.s024) + input_offset) * ((int8)weights_values0.s6 + weight_offset); \
- acc += ((int8)(src0.s79BD, src0.sF, src1.s135) + input_offset) * ((int8)weights_values0.s7 + weight_offset); \
- acc += ((int8)(src0.s8ACE, src1.s0246) + input_offset) * ((int8)weights_value1 + weight_offset); \
+#define CONVOLUTION1x9_STRIDE2(acc, src_row_ptr, weights_row_ptr) \
+ ({ \
+ int8 weights_values0 = convert_int8(vload8(0, weights_row_ptr)); \
+ int weights_value1 = convert_int(*(weights_row_ptr + 8)); \
+ int16 src0 = convert_int16(vload16(0, src_row_ptr)); \
+ int8 src1 = convert_int8(vload8(0, src_row_ptr + 16)); \
+ acc += (src0.even + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s1357, src0.s9BDF) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s2468, src0.sACE, src1.s0) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s3579, src0.sBDF, src1.s1) + INPUT_OFFSET) * ((int8)weights_values0.s3 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s468A, src0.sCE, src1.s02) + INPUT_OFFSET) * ((int8)weights_values0.s4 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s579B, src0.sDF, src1.s13) + INPUT_OFFSET) * ((int8)weights_values0.s5 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s68AC, src0.sE, src1.s024) + INPUT_OFFSET) * ((int8)weights_values0.s6 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s79BD, src0.sF, src1.s135) + INPUT_OFFSET) * ((int8)weights_values0.s7 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s8ACE, src1.s0246) + INPUT_OFFSET) * ((int8)weights_value1 + WEIGHTS_OFFSET); \
})
#elif KERNEL_SIZE == 5
@@ -649,30 +84,30 @@ __kernel void direct_convolution_quantized(
#error "STRIDE_X larger than 2 is not supported"
#endif /* STRIDE_X */
-#define CONVOLUTION1x5_STRIDE1(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- int4 weights_values0 = convert_int4(vload4(0, weights_row_ptr)); \
- int weights_value1 = convert_int(*(weights_row_ptr + 4)); \
- int8 src0 = convert_int8(vload8(0, src_row_ptr)); \
- int4 src1 = convert_int4(vload4(0, src_row_ptr + 8)); \
- acc += (src0 + input_offset) * ((int8)weights_values0.s0 + weight_offset); \
- acc += ((int8)(src0.s1234, src0.s567, src1.s0) + input_offset) * ((int8)weights_values0.s1 + weight_offset); \
- acc += ((int8)(src0.s234, src0.s567, src1.s01) + input_offset) * ((int8)weights_values0.s2 + weight_offset); \
- acc += ((int8)(src0.s345, src0.s67, src1.s012) + input_offset) * ((int8)weights_values0.s3 + weight_offset); \
- acc += ((int8)(src0.s45, src0.s67, src1.s0123) + input_offset) * ((int8)weights_value1 + weight_offset); \
+#define CONVOLUTION1x5_STRIDE1(acc, src_row_ptr, weights_row_ptr) \
+ ({ \
+ int4 weights_values0 = convert_int4(vload4(0, weights_row_ptr)); \
+ int weights_value1 = convert_int(*(weights_row_ptr + 4)); \
+ int8 src0 = convert_int8(vload8(0, src_row_ptr)); \
+ int4 src1 = convert_int4(vload4(0, src_row_ptr + 8)); \
+ acc += (src0 + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s1234, src0.s567, src1.s0) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s234, src0.s567, src1.s01) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s345, src0.s67, src1.s012) + INPUT_OFFSET) * ((int8)weights_values0.s3 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s45, src0.s67, src1.s0123) + INPUT_OFFSET) * ((int8)weights_value1 + WEIGHTS_OFFSET); \
})
-#define CONVOLUTION1x5_STRIDE2(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- int4 weights_values0 = convert_int4(vload4(0, weights_row_ptr)); \
- int weights_value1 = convert_int(*(weights_row_ptr + 4)); \
- int16 src0 = convert_int16(vload16(0, src_row_ptr)); \
- int4 src1 = convert_int4(vload4(0, src_row_ptr + 16)); \
- acc += (src0.even + input_offset) * ((int8)weights_values0.s0 + weight_offset); \
- acc += ((int8)(src0.s1357, src0.s9BDF) + input_offset) * ((int8)weights_values0.s1 + weight_offset); \
- acc += ((int8)(src0.s2468, src0.sACE, src1.s0) + input_offset) * ((int8)weights_values0.s2 + weight_offset); \
- acc += ((int8)(src0.s3579, src0.sBDF, src1.s1) + input_offset) * ((int8)weights_values0.s3 + weight_offset); \
- acc += ((int8)(src0.s468a, src0.sCE, src1.s02) + input_offset) * ((int8)weights_value1 + weight_offset); \
+#define CONVOLUTION1x5_STRIDE2(acc, src_row_ptr, weights_row_ptr) \
+ ({ \
+ int4 weights_values0 = convert_int4(vload4(0, weights_row_ptr)); \
+ int weights_value1 = convert_int(*(weights_row_ptr + 4)); \
+ int16 src0 = convert_int16(vload16(0, src_row_ptr)); \
+ int4 src1 = convert_int4(vload4(0, src_row_ptr + 16)); \
+ acc += (src0.even + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s1357, src0.s9BDF) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s2468, src0.sACE, src1.s0) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s3579, src0.sBDF, src1.s1) + INPUT_OFFSET) * ((int8)weights_values0.s3 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s468a, src0.sCE, src1.s02) + INPUT_OFFSET) * ((int8)weights_value1 + WEIGHTS_OFFSET); \
})
#elif KERNEL_SIZE == 3
@@ -685,24 +120,24 @@ __kernel void direct_convolution_quantized(
#error "STRIDE_X larger than 2 is not supported"
#endif /* STRIDE_X */
-#define CONVOLUTION1x3_STRIDE1(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- int3 weights_values0 = convert_int3(vload3(0, weights_row_ptr)); \
- int8 src0 = convert_int8(vload8(0, src_row_ptr)); \
- int2 src1 = convert_int2(vload2(0, src_row_ptr + 8)); \
- acc += (src0 + input_offset) * ((int8)weights_values0.s0 + weight_offset); \
- acc += ((int8)(src0.s1234, src0.s567, src1.s0) + input_offset) * ((int8)weights_values0.s1 + weight_offset); \
- acc += ((int8)(src0.s234, src0.s567, src1.s01) + input_offset) * ((int8)weights_values0.s2 + weight_offset); \
+#define CONVOLUTION1x3_STRIDE1(acc, src_row_ptr, weights_row_ptr) \
+ ({ \
+ int3 weights_values0 = convert_int3(vload3(0, weights_row_ptr)); \
+ int8 src0 = convert_int8(vload8(0, src_row_ptr)); \
+ int2 src1 = convert_int2(vload2(0, src_row_ptr + 8)); \
+ acc += (src0 + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s1234, src0.s567, src1.s0) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s234, src0.s567, src1.s01) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \
})
-#define CONVOLUTION1x3_STRIDE2(acc, src_row_ptr, weights_row_ptr) \
- ({ \
- int3 weights_values0 = convert_int3(vload3(0, weights_row_ptr)); \
- int16 src0 = convert_int16(vload16(0, src_row_ptr)); \
- int src1 = convert_int(*(src_row_ptr + 16)); \
- acc += (src0.even + input_offset) * ((int8)weights_values0.s0 + weight_offset); \
- acc += ((int8)(src0.s1357, src0.s9BDF) + input_offset) * ((int8)weights_values0.s1 + weight_offset); \
- acc += ((int8)(src0.s2468, src0.sACE, src1) + input_offset) * ((int8)weights_values0.s2 + weight_offset); \
+#define CONVOLUTION1x3_STRIDE2(acc, src_row_ptr, weights_row_ptr) \
+ ({ \
+ int3 weights_values0 = convert_int3(vload3(0, weights_row_ptr)); \
+ int16 src0 = convert_int16(vload16(0, src_row_ptr)); \
+ int src1 = convert_int(*(src_row_ptr + 16)); \
+ acc += (src0.even + INPUT_OFFSET) * ((int8)weights_values0.s0 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s1357, src0.s9BDF) + INPUT_OFFSET) * ((int8)weights_values0.s1 + WEIGHTS_OFFSET); \
+ acc += ((int8)(src0.s2468, src0.sACE, src1) + INPUT_OFFSET) * ((int8)weights_values0.s2 + WEIGHTS_OFFSET); \
})
#elif KERNEL_SIZE == 1
@@ -768,6 +203,9 @@ inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride3(__global const DATA_TYP
* @note If biases are used then -DHAS_BIAS has to be passed at compile time
* @note The output quantization multiplier must be passed at compile time using -DOUTPUT_MULTIPLIER e.g. -DOUTPUT_MULTIPLIER=1234
* @note The output quantization shift must be passed at compile time using -DOUTPUT_SHIFT e.g. -DOUTPUT_SHIFT=4
+ * @note The input offset quantization parameter must be passed at compile time using -DINPUT_OFFSET e.g. -DINPUT_OFFSET=3
+ * @note The weights offset quantization parameter must be passed at compile time using -DWEIGHTS_OFFSET e.g. -DWEIGHTS_OFFSET=3
+ * @note The destination offset quantization parameter must be passed at compile time using -DOUTPUT_OFFSET e.g. -DOUTPUT_OFFSET=3
*
* @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED
* @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
@@ -798,9 +236,6 @@ inline VEC_DATA_TYPE(DATA_TYPE, 8) extract_input_stride3(__global const DATA_TYP
* @param[in] biases_step_x biases_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] biases_offset_first_element_in_bytes The offset of the first element in the biases tensor
* @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- * @param[in] input_offset Input offset quantization parameter
- * @param[in] weight_offset Weights offset quantization parameter
- * @param[in] output_offset Output offset quantization parameter
*/
__kernel void direct_convolution_quantized(
TENSOR3D_DECLARATION(src),
@@ -809,10 +244,7 @@ __kernel void direct_convolution_quantized(
#ifdef HAS_BIAS
VECTOR_DECLARATION(biases),
#endif /* defined(HAS_BIAS) */
- unsigned int weights_stride_w,
- int input_offset,
- int weight_offset,
- int output_offset)
+ unsigned int weights_stride_w)
{
Image src = CONVERT_TO_IMAGE_STRUCT(src);
Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights);
@@ -851,7 +283,7 @@ __kernel void direct_convolution_quantized(
#elif KERNEL_SIZE == 1
int weight = convert_int(*(__global DATA_TYPE *)weights_addr);
int8 input_value = convert_int8(INPUT_VALUE((__global DATA_TYPE *)src_addr));
- values0 += (input_value + input_offset) * ((int8)weight + weight_offset);
+ values0 += (input_value + INPUT_OFFSET) * ((int8)weight + WEIGHTS_OFFSET);
#endif /* (KERNEL_SIZE == 1) || (KERNEL_SIZE == 3) || (KERNEL_SIZE == 5) */
src_addr += src_stride_z;
@@ -869,10 +301,8 @@ __kernel void direct_convolution_quantized(
#else // OUTPUT_SHIFT < 0
values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8);
#endif // OUTPUT_SHIFT < 0
- values0 = values0 + output_offset;
+ values0 = values0 + OUTPUT_OFFSET;
vstore8(CONVERT_SAT(values0, DATA_TYPE), 0, (__global DATA_TYPE *)dst.ptr);
}
-
-#endif // defined(DATA_LAYOUT_NHWC)
#endif // defined(DATA_TYPE) && defined(STRIDE_X) && defined(WEIGHTS_DEPTH) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)
diff --git a/src/core/CL/cl_kernels/gemm_helpers.h b/src/core/CL/cl_kernels/gemm_helpers.h
index 54d38655a4..be72efa3b4 100644
--- a/src/core/CL/cl_kernels/gemm_helpers.h
+++ b/src/core/CL/cl_kernels/gemm_helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -497,6 +497,185 @@
#define LOAD_TEXTURE2D(M0, N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) LOAD_TEXTURE2D_STR(M0, N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW)
/** @} */ // end of group LOAD_TEXTURE2D
+/** Loads the rows from 0 to n-1 in the given variables (BASENAME0 to BASENAMEn-1) passing the Y index for each row to be loaded.
+ * @name LOAD_ROW_INDIRECT_n
+ *
+ * @param[in] N0 The number of columns to load
+ * @param[in] DATA_TYPE The data type of variables
+ * @param[in] BASENAME The basename of the destination variables for the loaded rows
+ * @param[in] PTR The base pointer
+ * @param[in] OFFSET The offset within a row
+ * @param[in] STRIDE_Y The stride value in y-axis direction
+ * @param[in] Y The y-axis offset vector
+ * @param[in] Y_MASK The y-axis mask vector. If 0, forces BASENAMEn to 0
+ * @{
+ */
+#define LOAD_ROW_INDIRECT_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##0; \
+ if(Y_MASK##0 != 0) \
+ BASENAME##0 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##0 * STRIDE_Y)); \
+ else \
+ BASENAME##0 = 0;
+
+#define LOAD_ROW_INDIRECT_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##1; \
+ if(Y_MASK##1 != 0) \
+ BASENAME##1 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##1 * STRIDE_Y)); \
+ else \
+ BASENAME##1 = 0;
+
+#define LOAD_ROW_INDIRECT_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##2; \
+ if(Y_MASK##2 != 0) \
+ BASENAME##2 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##2 * STRIDE_Y)); \
+ else \
+ BASENAME##2 = 0;
+
+#define LOAD_ROW_INDIRECT_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##3; \
+ if(Y_MASK##3 != 0) \
+ BASENAME##3 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##3 * STRIDE_Y)); \
+ else \
+ BASENAME##3 = 0;
+
+#define LOAD_ROW_INDIRECT_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##4; \
+ if(Y_MASK##4 != 0) \
+ BASENAME##4 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##4 * STRIDE_Y)); \
+ else \
+ BASENAME##4 = 0;
+
+#define LOAD_ROW_INDIRECT_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##5; \
+ if(Y_MASK##5 != 0) \
+ BASENAME##5 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##5 * STRIDE_Y)); \
+ else \
+ BASENAME##5 = 0;
+
+#define LOAD_ROW_INDIRECT_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##6; \
+ if(Y_MASK##6 != 0) \
+ BASENAME##6 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##6 * STRIDE_Y)); \
+ else \
+ BASENAME##6 = 0;
+
+#define LOAD_ROW_INDIRECT_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##7; \
+ if(Y_MASK##7 != 0) \
+ BASENAME##7 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##7 * STRIDE_Y)); \
+ else \
+ BASENAME##7 = 0;
+
+#define LOAD_ROW_INDIRECT_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##8; \
+ if(Y_MASK##8 != 0) \
+ BASENAME##8 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##8 * STRIDE_Y)); \
+ else \
+ BASENAME##8 = 0;
+
+#define LOAD_ROW_INDIRECT_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##9; \
+ if(Y_MASK##9 != 0) \
+ BASENAME##9 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##9 * STRIDE_Y)); \
+ else \
+ BASENAME##9 = 0;
+
+#define LOAD_ROW_INDIRECT_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##A; \
+ if(Y_MASK##A != 0) \
+ BASENAME##A = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##A * STRIDE_Y)); \
+ else \
+ BASENAME##A = 0;
+
+#define LOAD_ROW_INDIRECT_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##B; \
+ if(Y_MASK##B != 0) \
+ BASENAME##B = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##B * STRIDE_Y)); \
+ else \
+ BASENAME##B = 0;
+
+#define LOAD_ROW_INDIRECT_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##C; \
+ if(Y_MASK##C != 0) \
+ BASENAME##C = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##C * STRIDE_Y)); \
+ else \
+ BASENAME##C = 0;
+
+#define LOAD_ROW_INDIRECT_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##D; \
+ if(Y_MASK##D != 0) \
+ BASENAME##D = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##D * STRIDE_Y)); \
+ else \
+ BASENAME##D = 0;
+
+#define LOAD_ROW_INDIRECT_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##E; \
+ if(Y_MASK##E != 0) \
+ BASENAME##E = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##E * STRIDE_Y)); \
+ else \
+ BASENAME##E = 0;
+
+#define LOAD_ROW_INDIRECT_16(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ LOAD_ROW_INDIRECT_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \
+ VEC_DATA_TYPE(DATA_TYPE, N0) \
+ BASENAME##F; \
+ if(Y_MASK##F != 0) \
+ BASENAME##F = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##F * STRIDE_Y)); \
+ else \
+ BASENAME##F = 0;
+
+/** Load blocks (consecutive rows and columns) with Y offset.
+ * @name LOAD_BLOCK_INDIRECT
+ *
+ * Supported cases are M0=1,2,3,...,16 and N0=1,2,3,4,8,16
+ * The data to load is expected to have consecutive names for each row.
+ * E.g., for M0=3, and BASENAME=c, the expected data is c0, c1 and c2.
+ * The Z offset is expected to have consecutive names.
+ * E.g., for M0=3, and Z=zin, the expected Z offsets are zin0, zin1 and zin2.
+ *
+ * @param[in] M0 The number of consecutive rows
+ * @param[in] N0 The number of consecutive columns
+ * @param[in] DATA_TYPE The data type of the target
+ * @param[in] BASENAME The basename of the result variables
+ * @param[in] PTR The base pointer for the data
+ * @param[in] OFFSET The offset within a row
+ * @param[in] STRIDE_Y The stride in y-axis direction
+ * @param[in] Y The y-axis offset vector
+ * @param[in] Y_MASK The y-axis mask vector. If 0, forces BASENAMEn to 0
+ * @{
+ */
+#define LOAD_BLOCK_INDIRECT_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) LOAD_ROW_INDIRECT_##M0(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK)
+#define LOAD_BLOCK_INDIRECT(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) LOAD_BLOCK_INDIRECT_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK)
+
/** Loads the elements from 0 to n-1 in the given variables (BASENAME0 to BASENAMEn-1).
* @name LOAD_ELEMENT_n
*
@@ -624,49 +803,49 @@
* @{
*/
#define CALCULATE_Z_OFFSET_1(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
- Z##0 = (0 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
+ Z##0 = (0 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##0 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##0); \
Z##0 *= (CROSS_PLANE_PAD * STRIDE_Y);
#define CALCULATE_Z_OFFSET_2(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
CALCULATE_Z_OFFSET_1(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
- Z##1 = (1 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
+ Z##1 = (1 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##1 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##1); \
Z##1 *= (CROSS_PLANE_PAD * STRIDE_Y);
#define CALCULATE_Z_OFFSET_3(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
CALCULATE_Z_OFFSET_2(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
- Z##2 = (2 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
+ Z##2 = (2 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##2 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##2); \
Z##2 *= (CROSS_PLANE_PAD * STRIDE_Y);
#define CALCULATE_Z_OFFSET_4(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
CALCULATE_Z_OFFSET_3(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
- Z##3 = (3 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
+ Z##3 = (3 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##3 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##3); \
Z##3 *= (CROSS_PLANE_PAD * STRIDE_Y);
#define CALCULATE_Z_OFFSET_5(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
CALCULATE_Z_OFFSET_4(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
- Z##4 = (4 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
+ Z##4 = (4 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##4 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##4); \
Z##4 *= (CROSS_PLANE_PAD * STRIDE_Y);
#define CALCULATE_Z_OFFSET_6(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
CALCULATE_Z_OFFSET_5(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
- Z##5 = (5 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
+ Z##5 = (5 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##5 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##5); \
Z##5 *= (CROSS_PLANE_PAD * STRIDE_Y);
#define CALCULATE_Z_OFFSET_7(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
CALCULATE_Z_OFFSET_6(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
- Z##6 = (6 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
+ Z##6 = (6 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##6 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##6); \
Z##6 *= (CROSS_PLANE_PAD * STRIDE_Y);
#define CALCULATE_Z_OFFSET_8(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
CALCULATE_Z_OFFSET_7(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
- Z##7 = (7 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
+ Z##7 = (7 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##7 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##7); \
Z##7 *= (CROSS_PLANE_PAD * STRIDE_Y);
diff --git a/src/core/CL/cl_kernels/gemmlowp.cl b/src/core/CL/cl_kernels/gemmlowp.cl
index 50dda7ef3c..ad92511c22 100644
--- a/src/core/CL/cl_kernels/gemmlowp.cl
+++ b/src/core/CL/cl_kernels/gemmlowp.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,6 @@
#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
-/** Specialized macros to perform the dot product instruction between two vectors of size N [1,16]. These macros use the dot8 instruction */
#define ARM_DOT1(a, b, c) \
({ \
ARM_DOT((VEC_DATA_TYPE(DATA_TYPE, 4))(a, (VEC_DATA_TYPE(DATA_TYPE, 3))0), (VEC_DATA_TYPE(DATA_TYPE, 4))(b, (VEC_DATA_TYPE(DATA_TYPE, 3))0), c); \
diff --git a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
index 8884521794..91ff35b58d 100644
--- a/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDirectConvolutionLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,8 +53,6 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != weights->dimension(height_idx), "Weights should have same width and height");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5 && weights->dimension(width_idx) != 9,
- "Kernel sizes other than 1x1, 3x3, 5x5 or 9x9 are not supported");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(channel_idx) != input->dimension(channel_idx),
"Weights feature map dimension should match the respective input's one");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->num_dimensions() > 4, "Weights can be at most 4 dimensional");
@@ -63,6 +61,20 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
&& std::get<0>(conv_info.stride()) > 2,
"Strides larger than 2 not supported for 3x3, 5x5, 9x9 convolution.");
+ if(data_layout == DataLayout::NCHW)
+ {
+ if(is_data_type_quantized(input->data_type()))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5 && weights->dimension(width_idx) != 9,
+ "Kernel sizes other than 1x1, 3x3, 5x5 or 9x9 are not supported with quantised data types");
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights->dimension(width_idx) != 1 && weights->dimension(width_idx) != 3 && weights->dimension(width_idx) != 5,
+ "Kernel sizes other than 1x1, 3x3 or 5x5 are not supported with float data types");
+ }
+ }
+
if(biases != nullptr)
{
if(is_data_type_quantized_asymmetric(input->data_type()))
@@ -102,8 +114,8 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *weights,
return Status{};
}
-inline bool can_run_optimized_kernel_for_bifrost(GPUTarget gpu_target, unsigned int conv_stride_x, unsigned int conv_stride_y, unsigned int kernel_size,
- DataType data_type, DataLayout data_layout)
+inline bool can_run_optimized_kernel_for_bifrost_nchw(GPUTarget gpu_target, unsigned int conv_stride_x, unsigned int conv_stride_y, unsigned int kernel_size,
+ DataType data_type, DataLayout data_layout)
{
return gpu_target_is_in(gpu_target,
GPUTarget::G71, GPUTarget::G72, GPUTarget::G76,
@@ -115,29 +127,16 @@ inline bool can_run_optimized_kernel_for_bifrost(GPUTarget gpu_target, unsigned
&& (data_layout == DataLayout::NCHW);
}
-inline bool can_run_optimized_kernel_for_bifrost_nhwc(GPUTarget gpu_target, unsigned int conv_stride_x, unsigned int conv_stride_y, unsigned int kernel_size,
- DataType data_type, DataLayout data_layout)
-{
- return gpu_target_is_in(gpu_target,
- GPUTarget::G71, GPUTarget::G72, GPUTarget::G76,
- GPUTarget::G51, GPUTarget::G51BIG, GPUTarget::G51LIT,
- GPUTarget::G52, GPUTarget::G52LIT)
- && (kernel_size == 9)
- && (conv_stride_x == 1) && (conv_stride_y == 1)
- && (data_type == DataType::F32)
- && (data_layout == DataLayout::NHWC);
-}
-
-inline void setup_num_elems(unsigned int &num_elems_read_per_iteration_x, unsigned int &num_elems_read_per_iteration_y,
- unsigned int &num_elems_written_per_iteration_x, unsigned int &num_elems_written_per_iteration_y,
- unsigned int kernel_size, const PadStrideInfo &conv_info, const GPUTarget target, ITensorInfo *input)
+inline void setup_num_elems_nchw(unsigned int &num_elems_read_per_iteration_x, unsigned int &num_elems_read_per_iteration_y,
+ unsigned int &num_elems_written_per_iteration_x, unsigned int &num_elems_written_per_iteration_y,
+ unsigned int kernel_size, const PadStrideInfo &conv_info, const GPUTarget target, ITensorInfo *input)
{
const DataType data_type = input->data_type();
const DataLayout data_layout = input->data_layout();
unsigned int conv_stride_x = std::get<0>(conv_info.stride());
unsigned int conv_stride_y = std::get<1>(conv_info.stride());
- const bool run_optimized_bifrost = can_run_optimized_kernel_for_bifrost(target, conv_stride_x, conv_stride_y, kernel_size, data_type, data_layout);
+ const bool run_optimized_bifrost = can_run_optimized_kernel_for_bifrost_nchw(target, conv_stride_x, conv_stride_y, kernel_size, data_type, data_layout);
if(run_optimized_bifrost)
{
@@ -174,7 +173,7 @@ inline void setup_num_elems(unsigned int &num_elems_read_per_iteration_x, unsign
}
}
}
- else if(data_layout == DataLayout::NCHW)
+ else
{
num_elems_read_per_iteration_y = kernel_size;
num_elems_written_per_iteration_x = 8;
@@ -253,97 +252,13 @@ inline void setup_num_elems(unsigned int &num_elems_read_per_iteration_x, unsign
ARM_COMPUTE_ERROR("Invalid direct convolution size");
}
}
- else // data_layout == NHWC
- {
- const bool run_optimized_bifrost_nhwc = can_run_optimized_kernel_for_bifrost_nhwc(target, conv_stride_x, conv_stride_y, kernel_size, data_type, data_layout);
-
- num_elems_written_per_iteration_x = 1;
-
- if(run_optimized_bifrost_nhwc)
- {
- num_elems_read_per_iteration_x = 4;
- }
- else
- {
- num_elems_read_per_iteration_x = 1;
- }
-
- switch(kernel_size)
- {
- case 1:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_y = 8;
- num_elems_written_per_iteration_y = 8;
- break;
- case 2:
- num_elems_read_per_iteration_y = 16;
- num_elems_written_per_iteration_y = 8;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- case 3:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_y = 10;
- num_elems_written_per_iteration_y = 8;
- break;
- case 2:
- num_elems_read_per_iteration_y = 17;
- num_elems_written_per_iteration_y = 8;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- case 5:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_y = 12;
- num_elems_written_per_iteration_y = 8;
- break;
- case 2:
- num_elems_read_per_iteration_y = 20;
- num_elems_written_per_iteration_y = 8;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- case 9:
- switch(conv_stride_x)
- {
- case 1:
- num_elems_read_per_iteration_y = 16;
- num_elems_written_per_iteration_y = 8;
- break;
- case 2:
- num_elems_read_per_iteration_y = 24;
- num_elems_written_per_iteration_y = 8;
- break;
- default:
- ARM_COMPUTE_ERROR("Invalid convolution stride X");
- }
- break;
- default:
- ARM_COMPUTE_ERROR("Not implemented.");
- break;
- }
- }
}
std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *weights, ITensorInfo *output, const PadStrideInfo &conv_info, const GPUTarget target)
{
- const DataLayout data_layout = input->data_layout();
- const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const unsigned int kernel_size = weights->dimension(width_idx);
+ const DataLayout data_layout = input->data_layout();
- // Get convolved dimensions
+ // Get output shape
TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*input, *weights, conv_info);
// Output auto inizialitation if not yet initialized
@@ -352,38 +267,39 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
input->data_type(),
input->quantization_info());
- unsigned int num_elems_read_per_iteration_x = 0;
- unsigned int num_elems_read_per_iteration_y = 0;
- unsigned int num_elems_written_per_iteration_x = 0;
- unsigned int num_elems_written_per_iteration_y = 0;
-
- unsigned int conv_pad_left = conv_info.pad_left();
- unsigned int conv_pad_top = conv_info.pad_top();
- unsigned int conv_stride_x = std::get<0>(conv_info.stride());
- unsigned int conv_stride_y = std::get<1>(conv_info.stride());
-
- setup_num_elems(num_elems_read_per_iteration_x, num_elems_read_per_iteration_y,
- num_elems_written_per_iteration_x, num_elems_written_per_iteration_y,
- kernel_size, conv_info, target, input);
-
- // Create window and update padding
- bool window_changed = false;
- Window win = calculate_max_window(*output, Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y));
-
if(data_layout == DataLayout::NHWC)
{
- AccessWindowStatic input_access(input, 0, -conv_pad_left,
- ceil_to_multiple(input->dimension(0), num_elems_read_per_iteration_x),
- ceil_to_multiple(input->dimension(1) + conv_info.pad_right(), num_elems_read_per_iteration_y));
- AccessWindowStatic weights_access(weights, 0, 0, ceil_to_multiple(weights->dimension(0), num_elems_read_per_iteration_x), weights->dimension(1));
- AccessWindowRectangle output_access(output, 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y);
- window_changed = update_window_and_padding(win, input_access, weights_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ const unsigned int vec_size = std::min(static_cast<unsigned int>(output->tensor_shape()[0]), 4u);
+
+ // Create window and update padding
+ Window win = calculate_max_window(*output, Steps(vec_size, 1U));
+ output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
+ Status err = Status{};
return std::make_pair(err, win);
}
else if(data_layout == DataLayout::NCHW)
{
+ const int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const unsigned int kernel_size = weights->dimension(width_idx);
+
+ unsigned int num_elems_read_per_iteration_x = 0;
+ unsigned int num_elems_read_per_iteration_y = 0;
+ unsigned int num_elems_written_per_iteration_x = 0;
+ unsigned int num_elems_written_per_iteration_y = 0;
+
+ unsigned int conv_pad_left = conv_info.pad_left();
+ unsigned int conv_pad_top = conv_info.pad_top();
+ unsigned int conv_stride_x = std::get<0>(conv_info.stride());
+ unsigned int conv_stride_y = std::get<1>(conv_info.stride());
+
+ setup_num_elems_nchw(num_elems_read_per_iteration_x, num_elems_read_per_iteration_y,
+ num_elems_written_per_iteration_x, num_elems_written_per_iteration_y,
+ kernel_size, conv_info, target, input);
+
+ // Create window and update padding
+ bool window_changed = false;
+ Window win = calculate_max_window(*output, Steps(num_elems_written_per_iteration_x, num_elems_written_per_iteration_y));
+
AccessWindowRectangle input_access(input, -conv_pad_left, -conv_pad_top, num_elems_read_per_iteration_x, num_elems_read_per_iteration_y, conv_stride_x, conv_stride_y);
AccessWindowStatic weights_access(weights, 0, 0, kernel_size, kernel_size);
AccessWindowRectangle output_access(output, 0, 0, num_elems_written_per_iteration_x, num_elems_written_per_iteration_y);
@@ -419,25 +335,7 @@ void CLDirectConvolutionLayerKernel::configure(const CLCompileContext &compile_c
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
- _data_layout = input->info()->data_layout();
- const int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
- const int channel_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
-
- const unsigned int kernel_size = weights->info()->dimension(width_idx);
- const DataType data_type = input->info()->data_type();
-
- // Get convolved dimensions
- TensorShape output_shape = misc::shape_calculator::compute_deep_convolution_shape(*input->info(), *weights->info(), conv_info);
-
- // Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output->info(),
- output_shape,
- 1,
- input->info()->data_type(),
- input->info()->quantization_info());
-
- // Perform validation step
+ // Perform validation
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(),
weights->info(),
(biases != nullptr) ? biases->info() : nullptr,
@@ -446,72 +344,64 @@ void CLDirectConvolutionLayerKernel::configure(const CLCompileContext &compile_c
_conv_stride_x = std::get<0>(conv_info.stride());
_conv_stride_y = std::get<1>(conv_info.stride());
-
- if(_data_layout == DataLayout::NHWC)
- {
- _border_size = BorderSize(conv_info.pad_left(), 0, conv_info.pad_right(), 0);
- }
- else if(_data_layout == DataLayout::NCHW)
- {
- _border_size = BorderSize(conv_info.pad_top(), conv_info.pad_right(), conv_info.pad_bottom(), conv_info.pad_left());
- }
- else
- {
- ARM_COMPUTE_ERROR("Not supported");
- }
-
- _input = input;
- _weights = weights;
- _output = output;
- _biases = biases;
+ _data_layout = input->info()->data_layout();
+ _input = input;
+ _weights = weights;
+ _output = output;
+ _biases = biases;
+
+ const unsigned int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
+ const unsigned int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
+ const unsigned int channel_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
+ const unsigned int kernel_size = weights->info()->dimension(width_idx);
+ const DataType data_type = input->info()->data_type();
const GPUTarget gpu_target = get_target();
- std::stringstream kernel_name;
- kernel_name << "direct_convolution" << kernel_size << "x" << kernel_size;
- if(_data_layout == DataLayout::NHWC)
- {
- kernel_name << "_" << lower_string(string_from_data_layout(_data_layout));
- }
-
- CLBuildOptions build_options;
- build_options.add_option_if(_biases != nullptr, std::string("-DHAS_BIAS"));
-
- const bool run_optimized_for_bifrost = can_run_optimized_kernel_for_bifrost(gpu_target, _conv_stride_x, _conv_stride_y, kernel_size, data_type, _data_layout);
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(input->info(), weights->info(), output->info(), conv_info, gpu_target);
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ ICLKernel::configure_internal(win_config.second);
- if(run_optimized_for_bifrost)
- {
- build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(channel_idx))));
+ std::stringstream kernel_name;
+ CLBuildOptions build_options;
- kernel_name << "_f32_bifrost";
- _kernel = create_kernel(compile_context, kernel_name.str(), build_options.options());
- }
- else
+ if(_data_layout == DataLayout::NHWC)
{
- build_options.add_option(std::string("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)));
- build_options.add_option(std::string("-DDATA_SIZE=" + get_data_size_from_data_type(data_type)));
- build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(channel_idx))));
- build_options.add_option(std::string("-DSTRIDE_X=" + support::cpp11::to_string(_conv_stride_x)));
- if(_data_layout == DataLayout::NHWC)
- {
- const bool run_optimized_for_bifrost_nhwc = can_run_optimized_kernel_for_bifrost_nhwc(gpu_target, _conv_stride_x, _conv_stride_y, kernel_size, data_type, _data_layout);
- build_options.add_option(std::string("-DDATA_LAYOUT_NHWC=1"));
- build_options.add_option(std::string("-DDST_HEIGHT=" + support::cpp11::to_string(_output->info()->dimension(height_idx))));
- build_options.add_option(std::string("-DDST_WIDTH=" + support::cpp11::to_string(_output->info()->dimension(width_idx))));
- build_options.add_option(std::string("-DSRC_HEIGHT=" + support::cpp11::to_string(_input->info()->dimension(height_idx))));
- build_options.add_option(std::string("-DSRC_WIDTH=" + support::cpp11::to_string(_input->info()->dimension(width_idx))));
- build_options.add_option(std::string("-DPAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left())));
- build_options.add_option(std::string("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_top())));
- build_options.add_option(std::string("-DPAD_BOTTOM=" + support::cpp11::to_string(conv_info.pad_bottom())));
- build_options.add_option(std::string("-DSTRIDE_Y=" + support::cpp11::to_string(_conv_stride_y)));
- if(run_optimized_for_bifrost_nhwc)
- {
- const unsigned int num_elems_read_per_iteration_x = 4;
- _border_size.right = num_elems_read_per_iteration_x;
- build_options.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_read_per_iteration_x));
- }
- }
- build_options.add_option(std::string("-DDATA_TYPE_PROMOTED=" + get_cl_type_from_data_type(data_type)));
+ _border_size = BorderSize();
+
+ kernel_name << "direct_convolution_nhwc";
+
+ const unsigned int n0 = win_config.second.x().step();
+ const unsigned int m0 = win_config.second.y().step();
+ const unsigned int k0 = std::min(static_cast<unsigned int>(_input->info()->dimension(channel_idx)), 16u);
+ const unsigned int partial_store_n0 = _output->info()->dimension(channel_idx) % n0;
+ const unsigned int partial_store_m0 = _output->info()->dimension(channel_idx) % m0;
+ const unsigned int pad_left = conv_info.pad_left();
+ const unsigned int pad_top = conv_info.pad_top();
+
+ build_options.add_option_if(_biases != nullptr, std::string("-DHAS_BIAS"));
+ build_options.add_option_if(_biases != nullptr, std::string("-DBIA_DATA_TYPE=" + get_cl_type_from_data_type(_biases->info()->data_type())));
+ build_options.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(_input->info()->dimension(width_idx)));
+ build_options.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(_input->info()->dimension(height_idx)));
+ build_options.add_option("-DSRC_CHANNELS=" + support::cpp11::to_string(_input->info()->dimension(channel_idx)));
+ build_options.add_option("-DSRC_DATA_TYPE=" + get_cl_type_from_data_type(_input->info()->data_type()));
+ build_options.add_option("-DDST_WIDTH=" + support::cpp11::to_string(_output->info()->dimension(width_idx)));
+ build_options.add_option("-DDST_HEIGHT=" + support::cpp11::to_string(_output->info()->dimension(height_idx)));
+ build_options.add_option("-DDST_CHANNELS=" + support::cpp11::to_string(_output->info()->dimension(channel_idx)));
+ build_options.add_option("-DDST_DATA_TYPE=" + get_cl_type_from_data_type(_output->info()->data_type()));
+ build_options.add_option("-DWEI_WIDTH=" + support::cpp11::to_string(_weights->info()->dimension(width_idx)));
+ build_options.add_option("-DWEI_HEIGHT=" + support::cpp11::to_string(_weights->info()->dimension(height_idx)));
+ build_options.add_option("-DWEI_DATA_TYPE=" + get_cl_type_from_data_type(_weights->info()->data_type()));
+ build_options.add_option("-DSTRIDE_X=" + support::cpp11::to_string(_conv_stride_x));
+ build_options.add_option("-DSTRIDE_Y=" + support::cpp11::to_string(_conv_stride_y));
+ build_options.add_option("-DPAD_LEFT=" + support::cpp11::to_string(pad_left));
+ build_options.add_option("-DPAD_TOP=" + support::cpp11::to_string(pad_top));
+ build_options.add_option("-DN0=" + support::cpp11::to_string(n0));
+ build_options.add_option("-DM0=" + support::cpp11::to_string(m0));
+ build_options.add_option("-DK0=" + support::cpp11::to_string(k0));
+ build_options.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0));
+ build_options.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0));
if(is_data_type_quantized(data_type))
{
@@ -523,33 +413,74 @@ void CLDirectConvolutionLayerKernel::configure(const CLCompileContext &compile_c
int output_multiplier = 0;
int output_shift = 0;
quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
- build_options.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
- build_options.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
- build_options.add_option("-DKERNEL_SIZE=" + support::cpp11::to_string(kernel_size));
-
- // Create kernel
- _kernel = create_kernel(compile_context, "direct_convolution_quantized", build_options.options());
-
- // Set static kernel arguments
- unsigned int idx = 3 * num_arguments_per_3D_tensor() + ((_biases != nullptr) ? num_arguments_per_1D_tensor() : 0) + 1;
- _kernel.setArg(idx++, -iqinfo.offset);
- _kernel.setArg(idx++, -wqinfo.offset);
- _kernel.setArg(idx++, oqinfo.offset);
+ build_options.add_option("-DIS_QUANTISED");
+ build_options.add_option("-DDST_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
+ build_options.add_option("-DDST_SHIFT=" + support::cpp11::to_string(output_shift));
+ build_options.add_option("-DSRC_OFFSET=" + support::cpp11::to_string(-iqinfo.offset));
+ build_options.add_option("-DWEI_OFFSET=" + support::cpp11::to_string(-wqinfo.offset));
+ build_options.add_option("-DDST_OFFSET=" + support::cpp11::to_string(oqinfo.offset));
+ build_options.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(DataType::S32));
}
else
{
- // Create kernel
- _kernel = create_kernel(compile_context, kernel_name.str(), build_options.options());
+ build_options.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(data_type));
+ build_options.add_option("-DSRC_OFFSET=" + support::cpp11::to_string(0));
+ build_options.add_option("-DWEI_OFFSET=" + support::cpp11::to_string(0));
+ build_options.add_option("-DDST_OFFSET=" + support::cpp11::to_string(0));
+ build_options.add_option("-DACC_DATA_TYPE=" + get_cl_type_from_data_type(data_type));
}
}
+ else
+ {
+ _border_size = BorderSize(conv_info.pad_top(), conv_info.pad_right(), conv_info.pad_bottom(), conv_info.pad_left());
- // Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), weights->info(), output->info(), conv_info, gpu_target);
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- ICLKernel::configure_internal(win_config.second);
+ kernel_name << "direct_convolution" << kernel_size << "x" << kernel_size;
+
+ build_options.add_option_if(_biases != nullptr, std::string("-DHAS_BIAS"));
+
+ const bool run_optimized_for_bifrost = can_run_optimized_kernel_for_bifrost_nchw(gpu_target, _conv_stride_x, _conv_stride_y, kernel_size, data_type, _data_layout);
+
+ if(run_optimized_for_bifrost)
+ {
+ build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(channel_idx))));
+
+ kernel_name << "_f32_bifrost";
+ }
+ else
+ {
+ build_options.add_option(std::string("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)));
+ build_options.add_option(std::string("-DDATA_SIZE=" + get_data_size_from_data_type(data_type)));
+ build_options.add_option(std::string("-DWEIGHTS_DEPTH=" + support::cpp11::to_string(_weights->info()->dimension(channel_idx))));
+ build_options.add_option(std::string("-DSTRIDE_X=" + support::cpp11::to_string(_conv_stride_x)));
+ build_options.add_option(std::string("-DDATA_TYPE_PROMOTED=" + get_cl_type_from_data_type(data_type)));
+
+ if(is_data_type_quantized(data_type))
+ {
+ const UniformQuantizationInfo iqinfo = _input->info()->quantization_info().uniform();
+ const UniformQuantizationInfo wqinfo = _weights->info()->quantization_info().uniform();
+ const UniformQuantizationInfo oqinfo = _output->info()->quantization_info().uniform();
+
+ float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
+ int output_multiplier = 0;
+ int output_shift = 0;
+ quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
+ build_options.add_option("-DOUTPUT_MULTIPLIER=" + support::cpp11::to_string(output_multiplier));
+ build_options.add_option("-DOUTPUT_SHIFT=" + support::cpp11::to_string(output_shift));
+ build_options.add_option("-DKERNEL_SIZE=" + support::cpp11::to_string(kernel_size));
+ build_options.add_option("-DINPUT_OFFSET=" + support::cpp11::to_string(-iqinfo.offset));
+ build_options.add_option("-DWEIGHTS_OFFSET=" + support::cpp11::to_string(-wqinfo.offset));
+ build_options.add_option("-DOUTPUT_OFFSET=" + support::cpp11::to_string(oqinfo.offset));
+
+ kernel_name.str("direct_convolution_quantized");
+ }
+ }
+ }
+
+ _kernel = create_kernel(compile_context, kernel_name.str(), build_options.options());
// Set config_id for enabling LWS tuning
- _config_id = "direct_convolution_";
+ _config_id = kernel_name.str();
+ _config_id += "_";
_config_id += lower_string(string_from_data_type(data_type));
_config_id += "_";
_config_id += support::cpp11::to_string(kernel_size);
@@ -588,38 +519,58 @@ void CLDirectConvolutionLayerKernel::run(const Window &window, cl::CommandQueue
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
// Get initial windows
- Window slice = window.first_slice_window_3D();
- Window win_in = window;
+ Window slice = window.first_slice_window_3D();
- win_in.adjust(Window::DimX, -_border_size.left, true);
- win_in.adjust(Window::DimY, -_border_size.top, true);
+ if(_data_layout == DataLayout::NHWC)
+ {
+ slice.set(Window::DimY, Window::Dimension(0, _output->info()->dimension(1) * _output->info()->dimension(2), 1));
+ slice.set(Window::DimZ, Window::Dimension(0, _output->info()->dimension(3), 1));
- const int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
- const int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, slice);
+ add_3D_tensor_argument(idx, _output, slice);
+ add_3D_tensor_argument(idx, _weights, slice);
+ if(_biases != nullptr)
+ {
+ add_1D_tensor_argument(idx, _biases, slice);
+ }
+ _kernel.setArg(idx++, static_cast<unsigned int>(_weights->info()->strides_in_bytes()[3]));
+ enqueue(queue, *this, slice, lws_hint());
+ }
+ else
+ {
+ Window win_in = window;
- win_in.set_dimension_step(width_idx, window[width_idx].step() * _conv_stride_x);
- win_in.set_dimension_step(height_idx, window[height_idx].step() * _conv_stride_y);
+ win_in.adjust(Window::DimX, -_border_size.left, true);
+ win_in.adjust(Window::DimY, -_border_size.top, true);
- Window slice_in = win_in.first_slice_window_3D();
- unsigned int idx1 = 2 * num_arguments_per_3D_tensor();
- add_3D_tensor_argument(idx1, _weights, slice);
+ const int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
+ const int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
- if(_biases != nullptr)
- {
- Window slice_biases;
- slice_biases.use_tensor_dimensions(_biases->info()->tensor_shape());
- add_1D_tensor_argument(idx1, _biases, slice_biases);
- }
+ win_in.set_dimension_step(width_idx, window[width_idx].step() * _conv_stride_x);
+ win_in.set_dimension_step(height_idx, window[height_idx].step() * _conv_stride_y);
- _kernel.setArg(idx1++, static_cast<unsigned int>(_weights->info()->strides_in_bytes()[3]));
+ Window slice_in = win_in.first_slice_window_3D();
+ unsigned int idx1 = 2 * num_arguments_per_3D_tensor();
+ add_3D_tensor_argument(idx1, _weights, slice);
- do
- {
- unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice_in);
- add_3D_tensor_argument(idx, _output, slice);
- enqueue(queue, *this, slice, lws_hint());
+ if(_biases != nullptr)
+ {
+ Window slice_biases;
+ slice_biases.use_tensor_dimensions(_biases->info()->tensor_shape());
+ add_1D_tensor_argument(idx1, _biases, slice_biases);
+ }
+
+ _kernel.setArg(idx1++, static_cast<unsigned int>(_weights->info()->strides_in_bytes()[3]));
+
+ do
+ {
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, slice_in);
+ add_3D_tensor_argument(idx, _output, slice);
+ enqueue(queue, *this, slice, lws_hint());
+ }
+ while(window.slide_window_slice_3D(slice) && win_in.slide_window_slice_3D(slice_in));
}
- while(window.slide_window_slice_3D(slice) && win_in.slide_window_slice_3D(slice_in));
}
} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLConvolutionLayer.cpp b/src/runtime/CL/functions/CLConvolutionLayer.cpp
index 5bfbc7ce57..b1351f6747 100644
--- a/src/runtime/CL/functions/CLConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -198,21 +198,42 @@ ConvolutionMethod CLConvolutionLayer::get_convolution_method(const ITensorInfo *
}
else
{
- // SRGAN
- if((input->dimension(idx_h) > 720U) && (output->dimension(idx_h) > 720U) && (weights->dimension(idx_h) == 9) && (conv_info.pad_top() < 3)
- && (CLDirectConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info)))
+ if(input->data_layout() == DataLayout::NCHW)
{
- return ConvolutionMethod::DIRECT;
+ // SRGAN
+ if((input->dimension(idx_h) > 720U) && (output->dimension(idx_h) > 720U) && (weights->dimension(idx_h) == 9) && (conv_info.pad_top() < 3)
+ && (CLDirectConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info)))
+ {
+ return ConvolutionMethod::DIRECT;
+ }
+ if((weights->dimension(idx_h) > 5) && (input->dimension(idx_c) > output->dimension(idx_c)) && (CLFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math)))
+ {
+ return ConvolutionMethod::FFT;
+ }
+ if(input->dimension(idx_c) < 16)
+ {
+ return ConvolutionMethod::GEMM;
+ }
+ return bool(CLWinogradConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math)) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
}
- if((weights->dimension(idx_h) > 7) && (input->dimension(idx_c) > output->dimension(idx_c)) && (CLFFTConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math)))
+ else
{
- return ConvolutionMethod::FFT;
+ // SRGAN
+ if((input->dimension(idx_h) > 720U) && (output->dimension(idx_h) > 720U) && (weights->dimension(idx_h) == 9) && (conv_info.pad_top() < 3)
+ && (CLDirectConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info)))
+ {
+ return ConvolutionMethod::DIRECT;
+ }
+ if((weights->dimension(idx_h) > 7) && (input->dimension(idx_c) > output->dimension(idx_c)) && (CLDirectConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info)))
+ {
+ return ConvolutionMethod::DIRECT;
+ }
+ if(input->dimension(idx_c) < 16)
+ {
+ return ConvolutionMethod::GEMM;
+ }
+ return bool(CLWinogradConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math)) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
}
- if(input->dimension(idx_c) < 16)
- {
- return ConvolutionMethod::GEMM;
- }
- return bool(CLWinogradConvolutionLayer::validate(input, weights, nullptr, output, conv_info, act_info, enable_fast_math)) ? ConvolutionMethod::WINOGRAD : ConvolutionMethod::GEMM;
}
}