aboutsummaryrefslogtreecommitdiff
path: root/src/core/GLES_COMPUTE/cs_shaders
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/GLES_COMPUTE/cs_shaders')
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/absdiff.cs60
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/activation_layer.cs150
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/activation_layer_helpers_cs.h130
-rwxr-xr-xsrc/core/GLES_COMPUTE/cs_shaders/arithmetic_add.cs65
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs266
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/concatenate.cs72
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/convolution_layer.cs791
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/depthwise_convolution3x3.cs316
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/direct_convolution1x1.cs1057
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/direct_convolution3x3.cs1155
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/direct_convolution5x5.cs225
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/dropout.cs148
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/fill_border.cs498
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/gemm.cs1130
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/helpers_cs.h498
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/normalization_layer.cs106
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/normalize_planar_yuv_layer.cs99
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/pixelwise_mul_float.cs58
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/pooling_layer.cs1052
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/scale.cs136
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/softmax_layer.cs363
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/tensor_shift.cs134
-rwxr-xr-xsrc/core/GLES_COMPUTE/cs_shaders/transpose.cs234
23 files changed, 0 insertions, 8743 deletions
diff --git a/src/core/GLES_COMPUTE/cs_shaders/absdiff.cs b/src/core/GLES_COMPUTE/cs_shaders/absdiff.cs
deleted file mode 100644
index c5196a14dc..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/absdiff.cs
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2017 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-/** Calculate the absolute difference of two input images.
- *
- * @param[in] src1_ptr Pointer to the first source image. Supported data types: U8
- * @param[in] src1_attrs The attributes of the first source image
- * @param[in] src2_ptr Pointer to the second source image. Supported data types: Same as @p in1_ptr
- * @param[in] src2_attrs The attributes of the second source image
- * @param[out] dst_ptr Pointer to the destination image. Supported data types: Same as @p in1_ptr
- * @param[in] dst_attrs The attributes of the destination image
- */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes src1_attrs;
- ImageAttributes src2_attrs;
- ImageAttributes dst_attrs;
-};
-
-TENSOR_DECLARATION(1, src1Buffer, uint, src1_ptr, src1_shift, 2, readonly);
-TENSOR_DECLARATION(2, src2Buffer, uint, src2_ptr, src2_shift, 2, readonly);
-TENSOR_DECLARATION(3, dstBuffer, uint, dst_ptr, dst_shift, 2, writeonly);
-
-void main(void)
-{
- ImageIterator src1_iter = CONVERT_TO_IMAGE_ITERATOR(src1_attrs, src1_shift);
- ImageIterator src2_iter = CONVERT_TO_IMAGE_ITERATOR(src2_attrs, src2_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
-
- lowp uvec4 tmp1 = LOAD_UNPACK4_CURRENT_ITEM_U8(src1_ptr, src1_iter);
- lowp uvec4 tmp2 = LOAD_UNPACK4_CURRENT_ITEM_U8(src2_ptr, src2_iter);
- lowp uvec4 diff = uvec4(abs(ivec4(tmp1 - tmp2)));
-
- STORE_PACK4_CURRENT_ITEM_U8(dst_ptr, dst_iter, diff);
-}
diff --git a/src/core/GLES_COMPUTE/cs_shaders/activation_layer.cs b/src/core/GLES_COMPUTE/cs_shaders/activation_layer.cs
deleted file mode 100644
index 983b31deba..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/activation_layer.cs
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (c) 2017-2019 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "activation_layer_helpers_cs.h"
-#include "helpers_cs.h"
-
-/** This performs an activation function floating point inputs.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- * @note Activation function should be given as a preprocessor argument using "#define act_name". e.g. "#define TANH"
- * @note A, B variables required by some activation functions are set using A_VAL= and B_VAL= respectively.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes dst_attrs;
-};
-
-#ifdef DATA_TYPE_FP32
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- float data = LOAD_CURRENT_ITEM(src_ptr, src_iter);
- float data_out = 0.f;
- // Perform activation
-#ifdef LOGISTIC
- data_out = logistic_op(data);
-#elif defined(TANH) /*LOGISTIC*/
- data_out = tanh_op(data);
-#elif defined(RELU) /*RELU*/
- data_out = relu_op(data);
-#elif defined(BRELU) /*BRELU*/
- data_out = brelu_op(data);
-#elif defined(LU_BRELU) /*LU_BRELU*/
- data_out = lu_brelu_op(data);
-#elif defined(LRELU) /*LRELU*/
- data_out = lrelu_op(data);
-#elif defined(SRELU) /*SRELU*/
- data_out = srelu_op(data);
-#elif defined(ELU) /*ELU*/
- data_out = elu_op(data);
-#elif defined(ABS) /*ABS*/
- data_out = abs_op(data);
-#elif defined(SQUARE) /*SQUARE*/
- data_out = square_op(data);
-#elif defined(SQRT) /*SQRT*/
- data_out = sqrt_op(data);
-#elif defined(LINEAR) /*LINEAR*/
- data_out = linear_op(data);
-#elif defined(IDENTITY) /*IDENTITY*/
- data_out = identity_op(data);
-#else /*LOGISTIC*/
-#error Activation function not provided
-#endif /*LOGISTIC*/
-
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, data_out);
-}
-
-#elif defined(DATA_TYPE_FP16)
-TENSOR_DECLARATION(1, srcBuffer, uint, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uint, dst_ptr, dst_shift, 2, writeonly);
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- vec2 data = LOAD_UNPACK2_CURRENT_ITEM_HALF(src_ptr, src_iter);
- // Perform activation
- float a = data.x;
- float b = data.y;
- vec2 data_out;
-#ifdef LOGISTIC /*LOGISTIC*/
- data_out.x = logistic_op(a);
- data_out.y = logistic_op(b);
-#elif defined(TANH) /*TANH*/
- data_out.x = tanh_op(a);
- data_out.y = tanh_op(b);
-#elif defined(RELU) /*RELU*/
- data_out.x = relu_op(a);
- data_out.y = relu_op(b);
-#elif defined(BRELU) /*BRELU*/
- data_out.x = brelu_op(a);
- data_out.y = brelu_op(b);
-#elif defined(LU_BRELU) /*LU_BRELU*/
- data_out.x = lu_brelu_op(a);
- data_out.y = lu_brelu_op(b);
-#elif defined(LRELU) /*LRELU*/
- data_out.x = lrelu_op(a);
- data_out.y = lrelu_op(b);
-#elif defined(SRELU) /*SRELU*/
- data_out.x = srelu_op(a);
- data_out.y = srelu_op(b);
-#elif defined(ELU) /*ELU*/
- data_out.x = elu_op(a);
- data_out.y = elu_op(b);
-#elif defined(ABS) /*ABS*/
- data_out.x = abs_op(a);
- data_out.y = abs_op(b);
-#elif defined(SQUARE) /*SQUARE*/
- data_out.x = square_op(a);
- data_out.y = square_op(b);
-#elif defined(SQRT) /*SQRT*/
- data_out.x = sqrt_op(a);
- data_out.y = sqrt_op(b);
-#elif defined(LINEAR) /*LINEAR*/
- data_out.x = linear_op(a);
- data_out.y = linear_op(b);
-#elif defined(IDENTITY) /*IDENTITY*/
- data_out.x = identity_op(a);
- data_out.y = identity_op(b);
-#else /*LOGISTIC*/
-#error Activation function not provided
-#endif /*LOGISTIC*/
-
- STORE_PACK2_CURRENT_ITEM_HALF(dst_ptr, dst_iter, data_out);
-}
-#endif /*DATA_TYPE_FP16*/
diff --git a/src/core/GLES_COMPUTE/cs_shaders/activation_layer_helpers_cs.h b/src/core/GLES_COMPUTE/cs_shaders/activation_layer_helpers_cs.h
deleted file mode 100644
index e353b744ea..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/activation_layer_helpers_cs.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2018-2019 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifdef DATA_TYPE_FP32
-precision highp float;
-#elif defined(DATA_TYPE_FP16)
-#if defined(LOGISTIC) || defined(TANH) || defined(SRELU) || defined(SQRT)
-precision highp float;
-#else /*LOGISTIC_TANH_SRELU_SQRT*/
-precision mediump float;
-#endif /*LOGISTIC_TANH_SRELU_SQRT*/
-#endif /*DATA_TYPE_FP32*/
-
-#define ABS_OP(a) abs((a))
-#define ADD_OP(a, b) ((a) + (b))
-#define SUB_OP(a, b) ((a) - (b))
-#define MUL_OP(a, b) ((a) * (b))
-#define MLA_OP(a, b, c) ((b) * (c) + (a))
-#define DIV_OP(a, b) ((a) / (b))
-#define EXP_OP(a) exp((a))
-#define LOG_OP(a) log((a))
-#define SQRT_OP(a) sqrt((a))
-#define CONST_ONE (1.f)
-
-// Logistic Activation
-float logistic_op(float x)
-{
- return DIV_OP(CONST_ONE, ADD_OP(CONST_ONE, EXP_OP(-x)));
-}
-vec4 logistic_op(vec4 x)
-{
- return DIV_OP(vec4(CONST_ONE), ADD_OP(CONST_ONE, EXP_OP(-x)));
-}
-// Hyperbolic Tangent Activation
-float tanh_op(float x)
-{
- float tmp = float(B_VAL) * x;
- if(tmp > 10.f)
- {
- return MUL_OP(float(A_VAL), 1.f);
- }
- else if(tmp < -10.f)
- {
- return MUL_OP(float(A_VAL), -1.f);
- }
- else
- {
- return MUL_OP(float(A_VAL), tanh(tmp + 0.000001f));
- }
-}
-// RELU Tangent Activation
-float relu_op(float x)
-{
- return max(0.f, x);
-}
-vec4 relu_op(vec4 x)
-{
- return max(vec4(0.f), x);
-}
-// Bounded RELU Activation
-float brelu_op(float x)
-{
- return min(float(A_VAL), max(float(0.0), x));
-}
-// Lower Upper Bounded RELU Activation
-float lu_brelu_op(float x)
-{
- return min(max(x, float(B_VAL)), float(A_VAL));
-}
-// Leaky RELU Activation
-float lrelu_op(float x)
-{
- return (x > float(0.0)) ? x : MUL_OP(float(A_VAL), x);
-}
-// Soft RELU Activation
-float srelu_op(float x)
-{
- return LOG_OP(ADD_OP(CONST_ONE, EXP_OP(x)));
-}
-// ELU Activation
-float elu_op(float x)
-{
- return (x >= float(0.0)) ? x : MUL_OP(float(A_VAL), SUB_OP(EXP_OP(x), CONST_ONE));
-}
-// Absolute Activation
-float abs_op(float x)
-{
- return ABS_OP(x);
-}
-// Square Activation
-float square_op(float x)
-{
- return MUL_OP(x, x);
-}
-// Square-root Activation
-float sqrt_op(float x)
-{
- return SQRT_OP(x);
-}
-// Linear Activation
-float linear_op(float x)
-{
- return MLA_OP(float(B_VAL), float(A_VAL), x);
-}
-
-// Linear Activation
-float identity_op(float x)
-{
- return x;
-}
diff --git a/src/core/GLES_COMPUTE/cs_shaders/arithmetic_add.cs b/src/core/GLES_COMPUTE/cs_shaders/arithmetic_add.cs
deleted file mode 100755
index faaf204c62..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/arithmetic_add.cs
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2016-2018 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-precision mediump float;
-#define ADD(x, y) (x) + (y)
-
-/** This function add two tensors.
- *
- * @param[in] src1_ptr Pointer to the first source tensor. Supported data types: F16
- * @param[in] src1_attrs The attributes of the first source tensor
- * @param[in] src2_ptr Pointer to the second source tensor. Supported data types: Same as @p src1_ptr
- * @param[in] src2_attrs The attributes of the second source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: Same as @p src1_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src1_attrs;
- Tensor3DAttributes src2_attrs;
- Tensor3DAttributes dst_attrs;
-};
-
-TENSOR_DECLARATION(1, src1Buffer, uvec4, src1_ptr, src1_shift, 4, readonly);
-TENSOR_DECLARATION(2, src2Buffer, uvec4, src2_ptr, src2_shift, 4, readonly);
-TENSOR_DECLARATION(3, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
-
-void main(void)
-{
- Tensor3DIterator src1_iter = CONVERT_TO_TENSOR3D_ITERATOR(src1_attrs, src1_shift);
- Tensor3DIterator src2_iter = CONVERT_TO_TENSOR3D_ITERATOR(src2_attrs, src2_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- vec4 tmp1[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src1_ptr, src1_iter);
- vec4 tmp2[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src2_ptr, src2_iter);
- vec4 addition[2];
- addition[0] = ADD(tmp1[0], tmp2[0]);
- addition[1] = ADD(tmp1[1], tmp2[1]);
-
- STORE_PACK8_CURRENT_ITEM_HALF(dst_ptr, dst_iter, addition);
-}
diff --git a/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs b/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs
deleted file mode 100644
index f38a90b947..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright (c) 2017-2018 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-#if defined(DATA_TYPE_FP16)
-precision mediump float;
-#endif /*DATA_TYPE_FP32*/
-
-#define ADD_OP(a, b) ((a) + (b))
-#define SUB_OP(a, b) ((a) - (b))
-#define MUL_OP(a, b) ((a) * (b))
-#define INVSQRT_OP(a) inversesqrt((a))
-#define SQCVT_SAT(a) (a)
-
-#if defined(LU_BRELU)
-#define ACTIVATION_FUNC(x) min(max(x, float(B_VAL)), float(A_VAL))
-#elif defined(BRELU)
-#define ACTIVATION_FUNC(x) min(max(x, float(0)), float(A_VAL))
-#elif defined(RELU)
-#define ACTIVATION_FUNC(x) max(x, float(0))
-#else /* defined(FUSED_ACT) */
-#define ACTIVATION_FUNC(x) (x)
-#endif /* defined(FUSED_ACT) */
-
-/** Apply batch normalization.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- * @note Epsilon parameter in the batch normalization equation should be given as a preprocessor argument using "#define EPSILON". e.g. "#define EPSILON 0.1"
- * @note Beta is optional with default value of 0. If not provided, the preprocessor argument "USE_DEFAULT_BETA" should be given
- * @note Gamma is optional with default value of 1. If not provided, the preprocessor argument "USE_DEFAULT_GAMMA" should be given
- *
- * @param[in] src_ptr Pointer to the first source tensor. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p src_ptr
- * @param[in] mean_attrs The attributes of the mean tensor
- * @param[in] var_ptr Pointer to the var tensor. Supported data types: same as @p src_ptr
- * @param[in] var_attrs The attributes of the var tensor
- * @param[in] beta_ptr (Optional) Pointer to the beta source tensor. If not provided, default value of beta is 0. Supported data types: same as @p src_ptr
- * @param[in] beta_attrs (Optional) The attributes of the beta tensor
- * @param[in] gamma_ptr (Optional) Pointer to the gamma source tensor. If not provided, default value of gamma is 1. Supported data types: same as @p src_ptr
- * @param[in] gamma_attrs (Optional) The attributes of the gamma tensor
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes dst_attrs;
- VectorAttributes mean_attrs;
- VectorAttributes var_attrs;
-#ifndef USE_DEFAULT_BETA
- VectorAttributes beta_attrs;
-#endif /* USE_DEFAULT_BETA */
-#ifndef USE_DEFAULT_GAMMA
- VectorAttributes gamma_attrs;
-#endif /* USE_DEFAULT_GAMMA */
-};
-
-#ifdef DATA_TYPE_FP32
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-TENSOR_DECLARATION(3, meanBuffer, float, mean_ptr, mean_shift, 2, readonly);
-TENSOR_DECLARATION(4, varBuffer, float, var_ptr, var_shift, 2, readonly);
-#ifndef USE_DEFAULT_BETA
-TENSOR_DECLARATION(5, betaBuffer, float, beta_ptr, beta_shift, 2, readonly);
-#endif /* USE_DEFAULT_BETA */
-#ifndef USE_DEFAULT_GAMMA
-#ifdef USE_DEFAULT_BETA
-TENSOR_DECLARATION(5, gammaBuffer, float, gamma_ptr, gamma_shift, 2, readonly);
-#else /* USE_DEFAULT_BETA */
-TENSOR_DECLARATION(6, gammaBuffer, float, gamma_ptr, gamma_shift, 2, readonly);
-#endif /* USE_DEFAULT_BETA */
-#endif /* USE_DEFAULT_GAMMA */
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
- VectorIterator mean_iter = CONVERT_TO_VECTOR_ITERATOR(mean_attrs, mean_shift);
- VectorIterator var_iter = CONVERT_TO_VECTOR_ITERATOR(var_attrs, var_shift);
-#ifndef USE_DEFAULT_BETA
- VectorIterator beta_iter = CONVERT_TO_VECTOR_ITERATOR(beta_attrs, beta_shift);
-#endif /* USE_DEFAULT_BETA */
-#ifndef USE_DEFAULT_GAMMA
- VectorIterator gamma_iter = CONVERT_TO_VECTOR_ITERATOR(gamma_attrs, gamma_shift);
-#endif /* USE_DEFAULT_GAMMA */
-
- float input_value = 0.f;
- float denominator = 0.f;
- float numerator = 0.f;
- float x_bar = 0.f;
-
- uint current_slice = gl_GlobalInvocationID.z;
-
- input_value = LOAD_CURRENT_ITEM(src_ptr, src_iter);
- denominator = LOAD(var_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(var_iter, current_slice * var_attrs.stride_x));
- denominator = INVSQRT_OP(ADD_OP(denominator, SQCVT_SAT(float(ESPILON))));
-
- // Calculate x bar and store results
- numerator = LOAD(mean_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(mean_iter, current_slice * mean_attrs.stride_x));
- numerator = SUB_OP(input_value, numerator);
- x_bar = MUL_OP(numerator, denominator);
-
-#ifndef USE_DEFAULT_GAMMA
- float gamma_param = LOAD(gamma_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(gamma_iter, current_slice * gamma_attrs.stride_x));
-
- x_bar = MUL_OP(gamma_param, x_bar);
-#endif /* USE_DEFAULT_GAMMA */
-#ifndef USE_DEFAULT_BETA
- float beta_param = LOAD(beta_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(beta_iter, current_slice * beta_attrs.stride_x));
-
- x_bar = ADD_OP(x_bar, beta_param);
-#endif /* USE_DEFAULT_BETA */
-
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, ACTIVATION_FUNC(x_bar));
-}
-
-#elif defined(DATA_TYPE_FP16)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-TENSOR_DECLARATION(3, meanBuffer, uvec2, mean_ptr, mean_shift, 3, readonly);
-TENSOR_DECLARATION(4, varBuffer, uvec2, var_ptr, var_shift, 3, readonly);
-#ifndef USE_DEFAULT_BETA
-TENSOR_DECLARATION(5, betaBuffer, uvec2, beta_ptr, beta_shift, 3, readonly);
-#endif /* USE_DEFAULT_BETA */
-#ifndef USE_DEFAULT_GAMMA
-#ifdef USE_DEFAULT_BETA
-TENSOR_DECLARATION(5, gammaBuffer, uvec2, gamma_ptr, gamma_shift, 3, readonly);
-#else /* USE_DEFAULT_BETA */
-TENSOR_DECLARATION(6, gammaBuffer, uvec2, gamma_ptr, gamma_shift, 3, readonly);
-#endif /* USE_DEFAULT_BETA */
-#endif /* USE_DEFAULT_GAMMA */
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
- VectorIterator mean_iter = CONVERT_TO_VECTOR_ITERATOR(mean_attrs, mean_shift);
- VectorIterator var_iter = CONVERT_TO_VECTOR_ITERATOR(var_attrs, var_shift);
-#ifndef USE_DEFAULT_BETA
- VectorIterator beta_iter = CONVERT_TO_VECTOR_ITERATOR(beta_attrs, beta_shift);
-#endif /* USE_DEFAULT_BETA */
-#ifndef USE_DEFAULT_GAMMA
- VectorIterator gamma_iter = CONVERT_TO_VECTOR_ITERATOR(gamma_attrs, gamma_shift);
-#endif /* USE_DEFAULT_GAMMA */
-
- vec4 unpacked_s[5];
- float denominator;
- float numerator;
- float gamma_param = 1.f;
- float beta_param = 0.f;
- vec4 x_bar;
- vec4 result;
-
- uint current_slice = gl_GlobalInvocationID.z;
- unpacked_s[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(src_ptr, src_iter);
- unpacked_s[1] = LOAD_UNPACK4_HALF(var_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(var_iter, current_slice * var_attrs.stride_x));
- unpacked_s[2] = LOAD_UNPACK4_HALF(mean_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(mean_iter, current_slice * mean_attrs.stride_x));
-#ifndef USE_DEFAULT_GAMMA
- unpacked_s[3] = LOAD_UNPACK4_HALF(gamma_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(gamma_iter, current_slice * gamma_attrs.stride_x));
-#endif /* USE_DEFAULT_BETA */
-#ifndef USE_DEFAULT_BETA
- unpacked_s[4] = LOAD_UNPACK4_HALF(beta_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(beta_iter, current_slice * beta_attrs.stride_x));
-#endif /* USE_DEFAULT_GAMMA */
-
- if((current_slice % uint(4)) == uint(0))
- {
- denominator = unpacked_s[1].x;
- denominator = INVSQRT_OP(ADD_OP(denominator, SQCVT_SAT(float(ESPILON))));
-
- // Calculate x bar
- numerator = unpacked_s[2].x;
- x_bar = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
-
-#ifndef USE_DEFAULT_GAMMA
- gamma_param = unpacked_s[3].x;
-#endif /* USE_DEFAULT_GAMMA */
-#ifndef USE_DEFAULT_BETA
- beta_param = unpacked_s[4].x;
-#endif /* USE_DEFAULT_BETA */
- }
- else if((current_slice % uint(4)) == uint(1))
- {
- denominator = unpacked_s[1].y;
- denominator = INVSQRT_OP(ADD_OP(denominator, SQCVT_SAT(float(ESPILON))));
-
- // Calculate x bar
- numerator = unpacked_s[2].y;
- x_bar = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
-
-#ifndef USE_DEFAULT_GAMMA
- gamma_param = unpacked_s[3].y;
-#endif /* USE_DEFAULT_GAMMA */
-#ifndef USE_DEFAULT_BETA
- beta_param = unpacked_s[4].y;
-#endif /* USE_DEFAULT_BETA */
- }
- else if((current_slice % uint(4)) == uint(2))
- {
- denominator = unpacked_s[1].z;
- denominator = INVSQRT_OP(ADD_OP(denominator, SQCVT_SAT(float(ESPILON))));
-
- // Calculate x bar
- numerator = unpacked_s[2].z;
- x_bar = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
-
-#ifndef USE_DEFAULT_GAMMA
- gamma_param = unpacked_s[3].z;
-#endif /* USE_DEFAULT_GAMMA */
-#ifndef USE_DEFAULT_BETA
- beta_param = unpacked_s[4].z;
-#endif /* USE_DEFAULT_BETA */
- }
- else
- {
- denominator = unpacked_s[1].w;
- denominator = INVSQRT_OP(ADD_OP(denominator, SQCVT_SAT(float(ESPILON))));
-
- // Calculate x bar
- numerator = unpacked_s[2].w;
- x_bar = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
-
-#ifndef USE_DEFAULT_GAMMA
- gamma_param = unpacked_s[3].w;
-#endif /* USE_DEFAULT_GAMMA */
-#ifndef USE_DEFAULT_BETA
- beta_param = unpacked_s[4].w;
-#endif /* USE_DEFAULT_BETA */
- }
-
-#ifndef USE_DEFAULT_GAMMA
- x_bar = MUL_OP(gamma_param, x_bar);
-#endif /* USE_DEFAULT_GAMMA */
-#ifndef USE_DEFAULT_BETA
- x_bar = ADD_OP(x_bar, beta_param);
-#endif /* USE_DEFAULT_BETA */
-
- result = ACTIVATION_FUNC(x_bar);
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
-}
-#endif /*DATA_TYPE_FP16*/
diff --git a/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs b/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs
deleted file mode 100644
index d1d1a8632f..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/concatenate.cs
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2017-2019 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-#if defined(DATA_TYPE_FP16)
-precision mediump float;
-#endif /*DATA_TYPE_FP16*/
-
-/** This kernel concatenates the input tensor into the output tensor along the third dimension
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes dst_attrs;
-};
-
-#ifdef DATA_TYPE_FP32
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- float tmp = LOAD_CURRENT_ITEM(src_ptr, src_iter);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp);
-}
-
-#elif defined(DATA_TYPE_FP16)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- uvec2 tmp = LOAD_CURRENT_ITEM(src_ptr, src_iter);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp);
-}
-#endif /*DATA_TYPE_FP16*/
diff --git a/src/core/GLES_COMPUTE/cs_shaders/convolution_layer.cs b/src/core/GLES_COMPUTE/cs_shaders/convolution_layer.cs
deleted file mode 100644
index d40cbbbaf0..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/convolution_layer.cs
+++ /dev/null
@@ -1,791 +0,0 @@
-/*
- * Copyright (c) 2017-2018 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-#if defined(DATA_TYPE_FP16)
-precision mediump float;
-#endif // DATA_TYPE_FP16
-
-#ifdef RESHAPE_TO_COLUMNS
-
-/** This kernel performs a reshaping of the input tensor to a tensor used to perform convolution using GEMM.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- * @note In case biases will be added to the convolution "#define HAS_BIAS" has to be passed to append the final matrix with 1 in each row.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_attrs The attributes of the biases tensor
- * @param[in] width The width of the input tensor
- * @param[in] height The height of the input tensor
- * @param[in] depth The depth of the input tensor
- * @param[in] total_filters Total number of filters. 4th dimension of the weights matrix
- */
-
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- ImageAttributes dst_attrs;
-#ifdef HAS_BIAS
- VectorAttributes biases_attrs;
-#endif /* HAS_BIAS */
- uint width;
- uint height;
- uint depth;
- uint total_filters;
-};
-
-#if defined(DATA_TYPE_FP32)
-
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-#ifdef HAS_BIAS
-TENSOR_DECLARATION(3, biasesBuffer, float, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-void main()
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
-#ifdef HAS_BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- bool is_last_thread = (((int(gl_GlobalInvocationID.x)) == (int(gl_NumWorkGroups.x * gl_WorkGroupSize.x) - 1)) && ((int(gl_GlobalInvocationID.y)) == (int(gl_NumWorkGroups.y * gl_WorkGroupSize.y) - 1))
- && ((int(gl_GlobalInvocationID.z)) == (int(gl_NumWorkGroups.z * gl_WorkGroupSize.z) - 1)));
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, ((uint(gl_GlobalInvocationID.x) * uint(dst_attrs.stride_y)) + (uint(gl_GlobalInvocationID.y) * uint(width) * uint(dst_attrs.stride_y)) + (uint(
- gl_GlobalInvocationID.z)
- * uint(width) * uint(height) * uint(dst_attrs.stride_y))));
- // Linearize convolution elements
- if(is_last_thread)
- {
- for(uint i = 0u; i < uint(total_filters); ++i)
- {
- float s0 = LOAD_CURRENT_ITEM(src_ptr, src_iter);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, s0);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (depth * src_attrs.stride_z));
-#ifdef HAS_BIAS
- float b = LOAD_CURRENT_ITEM(biases_ptr, biases_iter);
- STORE(dst_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(dst_iter, dst_attrs.stride_y), b);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(biases_iter, biases_attrs.stride_x);
-#endif /* HAS_BIAS */
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.stride_x);
- }
- }
- else
- {
- for(uint i = 0u; i < uint(total_filters); ++i)
- {
- float s0 = LOAD_CURRENT_ITEM(src_ptr, src_iter);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, s0);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (depth * src_attrs.stride_z));
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.stride_x);
- }
- }
-}
-
-#elif defined(DATA_TYPE_FP16)
-
-TENSOR_DECLARATION(1, srcBuffer, uint, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uint, dst_ptr, dst_shift, 2, writeonly);
-#ifdef HAS_BIAS
-TENSOR_DECLARATION(3, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-void main()
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
-#ifdef HAS_BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- bool is_last_thread = (((int(gl_GlobalInvocationID.x)) == (int(gl_NumWorkGroups.x * gl_WorkGroupSize.x) - 1)) && ((int(gl_GlobalInvocationID.y)) == (int(gl_NumWorkGroups.y * gl_WorkGroupSize.y) - 1))
- && ((int(gl_GlobalInvocationID.z)) == (int(gl_NumWorkGroups.z * gl_WorkGroupSize.z) - 1)));
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, ((uint(gl_GlobalInvocationID.x) * uint(dst_attrs.stride_y)) + (uint(gl_GlobalInvocationID.y) * uint(width) * uint(dst_attrs.stride_y)) + (uint(
- gl_GlobalInvocationID.z)
- * uint(width) * uint(height) * uint(dst_attrs.stride_y))));
- // Linearize convolution elements
- if(is_last_thread)
- {
- for(uint i = 0u; i < uint(total_filters); i = i + 2u)
- {
- vec2 s0 = LOAD_UNPACK2_CURRENT_ITEM_HALF(src_ptr, src_iter);
- vec2 s;
- if(int(CURRENT_ITEM_OFFSET_IN_BYTES(src_iter) >> 1u) % 2 == 0)
- {
- s.x = s0.x;
- }
- else
- {
- s.x = s0.y;
- }
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (depth * src_attrs.stride_z));
-
- vec2 s1 = LOAD_UNPACK2_CURRENT_ITEM_HALF(src_ptr, src_iter);
- if(int(CURRENT_ITEM_OFFSET_IN_BYTES(src_iter) >> 1u) % 2 == 0)
- {
- s.y = s1.x;
- }
- else
- {
- s.y = s1.y;
- }
- STORE_PACK2_CURRENT_ITEM_HALF(dst_ptr, dst_iter, s);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (depth * src_attrs.stride_z));
-#ifdef HAS_BIAS
- vec2 b = LOAD_UNPACK2_CURRENT_ITEM_HALF(biases_ptr, biases_iter);
- STORE_PACK2_HALF(dst_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(dst_iter, dst_attrs.stride_y), b);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(biases_iter, (2u * biases_attrs.stride_x));
-#endif /* HAS_BIAS */
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, (2u * dst_attrs.stride_x));
- }
- }
- else
- {
- for(uint i = 0u; i < uint(total_filters); i = i + 2u)
- {
- vec2 s0 = LOAD_UNPACK2_CURRENT_ITEM_HALF(src_ptr, src_iter);
- vec2 s;
- if(int(CURRENT_ITEM_OFFSET_IN_BYTES(src_iter) >> 1u) % 2 == 0)
- {
- s.x = s0.x;
- }
- else
- {
- s.x = s0.y;
- }
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (depth * src_attrs.stride_z));
-
- vec2 s1 = LOAD_UNPACK2_CURRENT_ITEM_HALF(src_ptr, src_iter);
- if(int(CURRENT_ITEM_OFFSET_IN_BYTES(src_iter) >> 1u) % 2 == 0)
- {
- s.y = s1.x;
- }
- else
- {
- s.y = s1.y;
- }
- STORE_PACK2_CURRENT_ITEM_HALF(dst_ptr, dst_iter, s);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (depth * src_attrs.stride_z));
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, (2u * dst_attrs.stride_x));
- }
- }
-}
-
-#endif /* DATA_TYPE_FP32 */
-#endif // RESHAPE_TO_COLUMNS
-
-#ifdef IM2COL_GENERIC
-
-/** This kernel performs a reshaping of the input tensor to a tensor used to perform convolution using GEMM.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_FP32"
- * @note PAD_LEFT/PAD_RIGHT/PAD_TOP/PAD_BOTTOM must be passed for padding info, e.g. "#define PAD_LEFT xxx"
- * @note KERNEL_WIDTH/KERNEL_HEIGHT/KERNEL_DEPTH must be passed for kernel dimension, e.g. "#define KERNEL_WIDTH xxx"
- * @note STRIDE_X/STRIDE_Y must be passed for stride info, e.g. "#define STRIDE_X xxx"
- * @note CONVOLVED_WIDTH/CONVOLVED_HEIGHT must be passed for convolved dimension, e.g. "#define CONVOLVED_WIDTH xxx"
- * @note SRC_WIDTH/SRC_HEIGHT must be passed for input dimension, e.g. "#define SRC_WIDTH xxx"
- * @note DILATION_X/DILATION_Y must be passed for dilation sizes, e.g. "#define DILATION_X xxx"
- * @note In case biases will be added to the convolution "#define HAS_BIAS" has to be passed to append the final matrix with 1 in each row.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes).
- * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes).
- */
-
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- ImageAttributes dst_attrs;
- uint src_stride_w;
- uint dst_stride_w;
-};
-
-#ifdef DATA_TYPE_FP32
-
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, restrict);
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
-
- int xc = int(gl_GlobalInvocationID.x); // x coordinate in the convolved tensor
- int yc = int(gl_GlobalInvocationID.y); // y coordinate in the convolved tensor
- int ch = int(gl_GlobalInvocationID.z) % KERNEL_DEPTH; // input feature map
- int batch = int(gl_GlobalInvocationID.z) / KERNEL_DEPTH; // the batch
-
- // Calculate input indeces
- int xi = xc * STRIDE_X - PAD_LEFT;
- int yi = yc * STRIDE_Y - PAD_TOP;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, (ch * int(src_attrs.stride_z)) + (batch * int(src_stride_w)));
-
- // Calculate output indeces
- int xo = ch * KERNEL_WIDTH * KERNEL_HEIGHT;
- int yo = xc + yc * CONVOLVED_WIDTH; // Index of the convolution
- // sizeof is not available in GLES, so we'll use stride_x
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, (yo * int(dst_attrs.stride_y)) + (batch * int(dst_stride_w)) + xo * int(dst_attrs.stride_x));
-
- uint src_pos = 0u;
-
- // Linearize convolution elements
- for(int y = yi, y_e = yi + KERNEL_HEIGHT * DILATION_Y; y < y_e; y += DILATION_Y)
- {
- for(int x = xi, x_e = xi + KERNEL_WIDTH * DILATION_X; x < x_e; x += DILATION_X, TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, int(dst_attrs.stride_x)))
- {
-#if PAD_LEFT == 0 && PAD_TOP == 0 && PAD_RIGHT == 0 && PAD_BOTTOM == 0
- src_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * int(src_attrs.stride_x) + y * int(src_attrs.stride_y));
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, LOAD(src_ptr, src_pos));
-#else /* PAD_LEFT == 0 && PAD_TOP == 0 && PAD_RIGHT == 0 && PAD_BOTTOM == 0 */
- if(x < 0 || x >= SRC_WIDTH || y < 0 || y >= SRC_HEIGHT)
- {
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, 0.0f);
- }
- else
- {
- src_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * int(src_attrs.stride_x) + y * int(src_attrs.stride_y));
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, LOAD(src_ptr, src_pos));
- }
-#endif /* PAD_LEFT == 0 && PAD_TOP == 0 && PAD_RIGHT == 0 && PAD_BOTTOM == 0 */
- }
- }
-
-#ifdef HAS_BIAS
- if(ch == (KERNEL_DEPTH - 1))
- {
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, 1.0f);
- }
-#endif /* HAS_BIAS */
-}
-
-#elif defined(DATA_TYPE_FP16)
-
-TENSOR_DECLARATION(1, srcBuffer, uint, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uint, dst_ptr, dst_shift, 2, writeonly);
-
-#ifdef KERNEL_1x1
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
-
- uint xc = gl_GlobalInvocationID.x;
- uint yc = gl_GlobalInvocationID.y;
- uint zc = gl_GlobalInvocationID.z;
- uint ch = zc % uint(KERNEL_DEPTH); // input feature map
- uint batch = zc / uint(KERNEL_DEPTH); // the batch
-
- // Calculate input indeces
- uint xi = xc;
- uint yi = yc;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, batch * src_stride_w + ch * src_attrs.step_z);
-
- // Calculate output indeces
- uint dst_element_count = dst_attrs.step_x / dst_attrs.stride_x;
- uint xo = ch * dst_element_count;
- uint yo = xc + yc * uint(CONVOLVED_WIDTH);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, batch * dst_stride_w + yo * dst_attrs.stride_y + xo);
-
- bool x_start_even = ((xc % 2u) == 0u);
- bool z_depth_even = ((uint(KERNEL_DEPTH) % 2u) == 0u);
- uint input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, xi * src_attrs.stride_x + yi * src_attrs.stride_y);
- uint tmp_left = 0u;
- uint tmp_right = 0u;
-
- if(ch % 2u != 0u)
- {
- return;
- }
-
- if(z_depth_even || (!z_depth_even && (int(ch) < (KERNEL_DEPTH - 1))))
- {
- tmp_left = LOAD(src_ptr, input_pos);
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, xi * src_attrs.stride_x + yi * src_attrs.stride_y + src_attrs.stride_z);
- tmp_right = LOAD(src_ptr, input_pos);
- if(x_start_even)
- {
- tmp_right = (tmp_left & 0xffffu) + (tmp_right << 16u);
- }
- else
- {
- tmp_right = (tmp_left >> 16u) + (tmp_right & 0xffff0000u);
- }
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp_right);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.step_x);
-
-#ifdef HAS_BIAS
- if(ch == (uint(KERNEL_DEPTH) - 2u))
- {
- mediump vec2 bias_vec = vec2(1.f, 0.f);
- uint bias_u = packHalf2x16(bias_vec);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, bias_u);
- }
-#endif /* HAS_BIAS */
- }
- else
- {
- tmp_left = LOAD(src_ptr, input_pos);
- if(x_start_even)
- {
- tmp_right = (tmp_left & 0xffffu);
- }
- else
- {
- tmp_right = (tmp_left >> 16u);
- }
-
-#ifdef HAS_BIAS
- mediump vec2 bias_vec = vec2(0.f, 1.f);
- uint bias_u = packHalf2x16(bias_vec);
- tmp_right += (bias_u & 0xffff0000u);
-#endif /* HAS_BIAS */
-
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp_right);
- }
-}
-
-#else /* KERNEL_1x1 */
-
-void main(void)
-{
- uint xc = gl_GlobalInvocationID.x;
- uint yc = gl_GlobalInvocationID.y;
- uint zc = gl_GlobalInvocationID.z;
- uint ch = zc % uint(KERNEL_DEPTH); // input feature map
- uint batch = zc / uint(KERNEL_DEPTH); // the batch
-
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(src_attrs, src_shift);
- Tensor3DIterator src_iter_b = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
-
- // Calculate input indeces
- uint src_element_count = src_attrs.step_x / src_attrs.stride_x;
- uint xi = (xc * uint(STRIDE_X)) / src_element_count;
- uint yi = yc * uint(STRIDE_Y);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, batch * src_stride_w + ch * src_attrs.stride_z);
-
- // Calculate output indeces
- uint dst_element_count = dst_attrs.step_x / dst_attrs.stride_x;
- uint xo = (ch * uint(KERNEL_WIDTH) * uint(KERNEL_HEIGHT)) * dst_element_count;
- uint yo = xc + yc * uint(CONVOLVED_WIDTH);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, batch * dst_stride_w + yo * dst_attrs.stride_y + xo);
-
- bool x_start_even = ((xc * uint(STRIDE_X)) % 2u == 0u);
- bool z_start_even = ((ch % 2u) == 0u);
- uint input_pos = 0u;
- uint tmp = 0u;
- uint tmp_left = 0u;
- uint tmp_right = 0u;
-
- // Linearize convolution elements
- for(uint y = yi, y_e = yi + uint(KERNEL_HEIGHT); y < y_e; ++y)
- {
- uint xstart = 0u;
- uint xend = 0u;
-
- // even col, even row
- if(x_start_even)
- {
- if(((y - yi + ch) % 2u) == 0u)
- {
- for(uint x = xi, x_e = xi + (uint(KERNEL_WIDTH) / 2u); x < x_e; ++x, TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.step_x))
- {
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * src_attrs.step_x + y * src_attrs.stride_y);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, LOAD(src_ptr, input_pos));
- }
- }
- else
- {
- // 1st pair
- if(!z_start_even && (y == yi))
- {
- // cross 2d feature map
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter_b, (xi + (uint(KERNEL_WIDTH) / 2u)) * src_attrs.step_x + (yi + uint(KERNEL_HEIGHT) - 1u) * src_attrs.stride_y + batch * src_stride_w +
- (ch - 1u) * src_attrs.stride_z);
- }
- else
- {
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter,
- (xi + (uint(KERNEL_WIDTH) / 2u)) * src_attrs.step_x + (y - 1u) * src_attrs.stride_y);
- }
- tmp_right = LOAD(src_ptr, input_pos);
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, xi * src_attrs.step_x + y * src_attrs.stride_y);
- tmp_left = LOAD(src_ptr, input_pos);
- tmp_right = (tmp_right & 0xffffu) + (tmp_left << 16u);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp_right);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.step_x);
-
- // remaining
- for(uint x = xi + 1u, x_e = xi + (uint(KERNEL_WIDTH) / 2u) + 1u; x < x_e; ++x, TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.step_x))
- {
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, (x - 1u) * src_attrs.step_x + y * src_attrs.stride_y);
- tmp_left = LOAD(src_ptr, input_pos);
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * src_attrs.step_x + y * src_attrs.stride_y);
- tmp_right = LOAD(src_ptr, input_pos);
- tmp_right = (tmp_left >> 16u) + (tmp_right << 16u);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp_right);
- }
- }
- }
- else
- {
- if((((y - yi) % 2u) == 0u && !z_start_even) || (((y - yi) % 2u) != 0u && z_start_even))
- {
- // 1st pair
- if(y == yi)
- {
- // cross 2d feature map
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter_b, (xi + (uint(KERNEL_WIDTH) / 2u)) * src_attrs.step_x + (yi + uint(KERNEL_HEIGHT) - 1u) * src_attrs.stride_y + batch * src_stride_w +
- (ch - 1u) * src_attrs.stride_z);
- }
- else
- {
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter,
- (xi + (uint(KERNEL_WIDTH) / 2u)) * src_attrs.step_x + (y - 1u) * src_attrs.stride_y);
- }
-
- tmp_right = LOAD(src_ptr, input_pos);
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, xi * src_attrs.step_x + y * src_attrs.stride_y);
- tmp_left = LOAD(src_ptr, input_pos);
- tmp_right = (tmp_right >> 16u) + (tmp_left & 0xffff0000u);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp_right);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.step_x);
-
- // remaining
- for(uint x = xi + 1u, x_e = xi + (uint(KERNEL_WIDTH) / 2u) + 1u; x < x_e; ++x, TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.step_x))
- {
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * src_attrs.step_x + y * src_attrs.stride_y);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, LOAD(src_ptr, input_pos));
- }
- }
- else if((((y - yi) % 2u) == 0u && z_start_even) || (((y - yi) % 2u) != 0u && !z_start_even))
- {
- // 1st pair
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, xi * src_attrs.step_x + y * src_attrs.stride_y);
- tmp_right = LOAD(src_ptr, input_pos);
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, (xi + 1u) * src_attrs.step_x + y * src_attrs.stride_y);
- tmp_left = LOAD(src_ptr, input_pos);
- tmp_right = (tmp_right >> 16u) + (tmp_left << 16u);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp_right);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.step_x);
-
- // remaining
- for(uint x = xi + 1u, x_e = xi + (uint(KERNEL_WIDTH) / 2u); x < x_e; ++x, TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.step_x))
- {
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * src_attrs.step_x + y * src_attrs.stride_y);
- tmp_right = LOAD(src_ptr, input_pos);
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, (x + 1u) * src_attrs.step_x + y * src_attrs.stride_y);
- tmp_left = LOAD(src_ptr, input_pos);
- tmp_right = (tmp_right >> 16u) + (tmp_left << 16u);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp_right);
- }
- }
- }
- }
-
- // NOTE: must handle last element manually instead of in loops
- // to avoid write conflict across 2d boundary
- if(ch == uint(KERNEL_DEPTH) - 1u)
- {
- uint x = xi + (uint(KERNEL_WIDTH) / 2u);
- uint y = yi + uint(KERNEL_HEIGHT) - 1u;
- input_pos = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, x * src_attrs.step_x + y * src_attrs.stride_y);
- tmp = LOAD(src_ptr, input_pos);
- if(!x_start_even)
- {
- tmp = (tmp >> 16u) + (tmp << 16u);
- }
-
-#ifdef HAS_BIAS
- mediump vec2 bias_vec = vec2(1.f, 1.f);
- uint bias_u = packHalf2x16(bias_vec);
- if(z_start_even)
- {
- tmp = (tmp & 0xffffu) + (bias_u & 0xffff0000u);
- }
- else
- {
- tmp = (bias_u & 0xffffu);
- }
-#endif /* HAS_BIAS */
-
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, tmp);
- }
-}
-
-#endif /* KERNEL_1x1 */
-#else /* DATA_TYPE_FP32 */
-#error Data type not supported
-#endif /* DATA_TYPE_FP32 */
-#endif /* IM2COL_GENERIC */
-
-#ifdef IM2COL_REDUCED
-
-/** This kernel reshapes the tensor's low three dimensions to single row for GEMM operation
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_FP16"
- * @note In case biases will be added in late stage, "#define HAS_BIAS" has to be passed to append the final matrix with 1 in each row.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- * @param[in] width The width of the input tensor
- * @param[in] height The height of the input tensor
- */
-
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- VectorAttributes dst_attrs;
- uint width;
- uint height;
-};
-
-#ifdef DATA_TYPE_FP32
-
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, restrict);
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- VectorIterator dst_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(dst_attrs, dst_shift);
-
- uvec3 pos = uvec3(gl_GlobalInvocationID.xyz);
- uvec3 size = uvec3(gl_WorkGroupSize.xyz);
- uint image_size = width * height;
- uint tmp_out_offset = VECTOR_OFFSET(dst_iter, pos.x + pos.y * width + pos.z * image_size);
-
- STORE(dst_ptr, tmp_out_offset, LOAD_CURRENT_ITEM(src_ptr, src_iter));
-
-#ifdef HAS_BIAS
- // If it is the last thread in the 3 dimensional workgroup
- if(pos.x == (size.x - 1) && pos.y == (size.y - 1) && pos.z == (size.z - 1))
- {
- tmp_out_offset += (dst_attrs.stride_x >> uint(2));
- STORE(dst_ptr, tmp_out_offset, 1.f);
- }
-#endif // HAS_BIAS
-}
-
-#elif defined(DATA_TYPE_FP16)
-
-#if defined(IM2COL_REDUCED_8X)
-TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec4, dst_ptr, dst_shift, 4, restrict);
-#elif defined(IM2COL_REDUCED_4X) /* IM2COL_REDUCED_8X */
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, restrict);
-#else /* IM2COL_REDUCED_8X */
-TENSOR_DECLARATION(1, srcBuffer, uint, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uint, dst_ptr, dst_shift, 2, restrict);
-#endif /* IM2COL_REDUCED_8X */
-
-#if defined(IM2COL_REDUCED_GENERIC)
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator src_nostep_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(src_attrs, src_shift);
- VectorIterator dst_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(dst_attrs, dst_shift);
-
- uvec3 pos = uvec3(gl_GlobalInvocationID.xyz);
- uvec3 size = uvec3(gl_WorkGroupSize.xyz);
- uint image_size = width * height;
- uint element_count = src_attrs.step_x / src_attrs.stride_x;
- uint tmp_out_offset = VECTOR_OFFSET(dst_iter, pos.x * element_count + pos.y * width + pos.z * image_size);
- uint width_fp16 = (width + uint(1)) >> uint(1);
- uint tmp;
-
- // odd width
- if(width % uint(2) != uint(0))
- {
- // even row
- if((pos.y + pos.z * height) % uint(2) == uint(0))
- {
- // skip last element of each line to avoid write conflict except for last line
- if((pos.x < (width / element_count)) || ((pos.y == gl_NumWorkGroups.y - 1u) && (pos.z == gl_NumWorkGroups.z - 1u)))
- {
- tmp = LOAD_CURRENT_ITEM(src_ptr, src_iter);
- STORE(dst_ptr, tmp_out_offset, tmp);
- }
- }
- else
- {
- // special op
- uint tmp_left = uint(0);
- uint tmp_right = uint(0);
- tmp_right = LOAD_CURRENT_ITEM(src_ptr, src_iter); //right half
- if(pos.x == uint(0))
- {
- tmp_left = LOAD(src_ptr, TENSOR3D_OFFSET(src_nostep_iter, int(width), int(pos.y) - 1, int(pos.z))); //left half
- tmp_right = (tmp_left & uint(0xffff)) + (tmp_right << uint(16));
- }
- else
- {
- tmp_left = LOAD(src_ptr, TENSOR3D_OFFSET(src_nostep_iter, (int(pos.x) - 1) * int(element_count), int(pos.y), int(pos.z)));
- tmp_right = ((tmp_left >> uint(16)) + (tmp_right << uint(16)));
- }
- STORE(dst_ptr, tmp_out_offset, tmp_right);
- }
- }
- else
- {
- tmp = LOAD_CURRENT_ITEM(src_ptr, src_iter);
- STORE(dst_ptr, tmp_out_offset, tmp);
- }
-
-#ifdef HAS_BIAS
- // If it is the last thread in the 3 dimensional workgroup
- if(pos.x == (size.x - 1u) && pos.y == (size.y - 1u) && pos.z == (size.z - 1u))
- {
- tmp_out_offset += (dst_attrs.stride_x >> dst_shift);
-
- // FIXME: need odd/even detection for tmp_out_offset?
- mediump vec2 bias_vec = vec2(1.0f, 1.0f);
- STORE_PACK2_HALF(dst_ptr, tmp_out_offset, bias_vec);
- }
-#endif // HAS_BIAS
-}
-
-#else /* IM2COL_REDUCED_GENERIC */
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- VectorIterator dst_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(dst_attrs, dst_shift);
-
- uvec3 pos = uvec3(gl_GlobalInvocationID.xyz);
-#if defined(IM2COL_REDUCED_8X)
- uint tmp_out_offset = VECTOR_OFFSET(dst_iter, pos.x * uint(8) + pos.y * width + pos.z * uint(IMAGE_SIZE));
- uvec4 tmp = LOAD_CURRENT_ITEM(src_ptr, src_iter);
- STORE(dst_ptr, tmp_out_offset, tmp);
-#elif defined(IM2COL_REDUCED_4X) /* IM2COL_REDUCED_8X */
- uint tmp_out_offset = VECTOR_OFFSET(dst_iter, pos.x * uint(4) + pos.y * width + pos.z * uint(IMAGE_SIZE));
- uvec2 tmp = LOAD_CURRENT_ITEM(src_ptr, src_iter);
- STORE(dst_ptr, tmp_out_offset, tmp);
-#else /* IM2COL_REDUCED_8X */
- uint tmp_out_offset = VECTOR_OFFSET(dst_iter, pos.x * uint(2) + pos.y * width + pos.z * uint(IMAGE_SIZE));
- uint tmp = LOAD_CURRENT_ITEM(src_ptr, src_iter);
- STORE(dst_ptr, tmp_out_offset, tmp);
-#endif /* IM2COL_REDUCED_8X */
-}
-
-#endif /* IM2COL_REDUCED_GENERIC */
-#else /* DATA_TYPE_FP32 */
-#error Data type not supported
-#endif /* DATA_TYPE_FP32 */
-#endif /* IM2COL_REDUCED */
-
-#ifdef COL2IM
-#ifdef WIDTH_OUTPUT
-
-/** This kernel performs a reshaping of the output of the convolution layer.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_FP32"
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- * @param[in] dst_depth The length of the destination tensor in Z dimension
- * @param[in] dst_strideZ The actual stride of the destination tensor in Z dimension
- */
-
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes dst_attrs;
- uint dst_depth;
- uint dst_strideZ;
-};
-
-#ifdef DATA_TYPE_FP32
-
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, restrict);
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- uvec3 pos = uvec3(gl_GlobalInvocationID.xyz);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, pos.x * src_attrs.step_y + pos.y * uint(WIDTH_OUTPUT) * src_attrs.step_y + (pos.z % dst_depth) * src_attrs.stride_x + (pos.z / dst_depth) * dst_strideZ);
-
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, LOAD_CURRENT_ITEM(src_ptr, src_iter));
-}
-
-#elif defined(DATA_TYPE_FP16)
-
-TENSOR_DECLARATION(1, srcBuffer, uint, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uint, dst_ptr, dst_shift, 2, restrict);
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- uvec3 pos = uvec3(gl_GlobalInvocationID.xyz);
-
- if((pos.z % dst_depth) % 2u == 0u)
- {
- uint common_offset_in_bytes = pos.x * src_attrs.step_y * 2u + pos.y * uint(WIDTH_OUTPUT) * src_attrs.step_y + (pos.z % dst_depth) * src_attrs.stride_x + (pos.z / dst_depth) * dst_strideZ;
- uint tmp1_in_offset = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, common_offset_in_bytes);
- uint tmp2_in_offset = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, common_offset_in_bytes + src_attrs.step_y);
- vec2 tmp1 = LOAD_UNPACK2_HALF(src_ptr, tmp1_in_offset);
- vec2 tmp2 = LOAD_UNPACK2_HALF(src_ptr, tmp2_in_offset);
- vec2 result = vec2(tmp1.x, tmp2.x);
- STORE_PACK2_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
- }
- else
- {
- uint common_offset_in_bytes = pos.x * src_attrs.step_y * 2u + pos.y * uint(WIDTH_OUTPUT) * src_attrs.step_y + (pos.z % dst_depth) * src_attrs.stride_x + (pos.z / dst_depth) * dst_strideZ - 2u;
- uint tmp1_in_offset = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, common_offset_in_bytes);
- uint tmp2_in_offset = TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, common_offset_in_bytes + src_attrs.step_y);
- vec2 tmp1 = LOAD_UNPACK2_HALF(src_ptr, tmp1_in_offset);
- vec2 tmp2 = LOAD_UNPACK2_HALF(src_ptr, tmp2_in_offset);
- vec2 result = vec2(tmp1.y, tmp2.y);
- STORE_PACK2_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
- }
-}
-
-#else /* DATA_TYPE_FP32 */
-#error Data type not supported
-#endif /* DATA_TYPE_FP32 */
-#endif /* WIDTH_OUTPUT */
-#endif /* COL2IM */
diff --git a/src/core/GLES_COMPUTE/cs_shaders/depthwise_convolution3x3.cs b/src/core/GLES_COMPUTE/cs_shaders/depthwise_convolution3x3.cs
deleted file mode 100644
index 3e7e1fd351..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/depthwise_convolution3x3.cs
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * Copyright (c) 2017-2018 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-#if defined(DATA_TYPE_FP16)
-precision mediump float;
-#endif // DATA_TYPE_FP16
-
-/** This kernel performs a depthwise convolution.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- * @note This kernel has multiple optimized depthwise convolution options for FP16.
- * The depthwise convolution option must be passed at compile time using "#define PROCESS_nX_nY_nZ" e.g. "#define PROCESS_8X_1Y_1Z"
- * @note The convolution stride x must be passed at compile time using "#define STRIDE_X n" e.g. "#define STRIDE_X 1"
- * @note In case biases will be added to the convolution "#define HAS_BIAS" has to be passed to append the final matrix with 1 in each row.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_attrs The attributes of the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_attrs The attributes of the weights tensor
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes dst_attrs;
- Tensor3DAttributes weights_attrs;
-#ifdef BIAS
- VectorAttributes biases_attrs;
-#endif /* BIAS */
-};
-
-#if defined(DATA_TYPE_FP16)
-#if defined(PROCESS_4X_3Y_1Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uvec2, weights_ptr, weights_shift, 3, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#define LOAD_UNPACK_SWIZZLE(offset) load_unpack_swizzle_stride1(offset)
-
-vec4 convolve1x3(vec4 s[3], vec4 w)
-{
- vec4 r;
-
- r = s[0] * w[0] + s[1] * w[1] + s[2] * w[2];
-
- return r;
-}
-
-vec4[3] load_unpack_swizzle_stride1(uint offset)
-{
- vec4 s[2];
- s = VLOAD2_UNPACK8_HALF(src_ptr, offset);
-
- vec4 r[3];
- r[0] = s[0];
- r[1] = vec4(s[0].yzw, s[1].x);
- r[2] = vec4(s[0].zw, s[1].xy);
-
- return r;
-}
-
-void main()
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels[3];
- for(int i = 0; i < 3; i++)
- {
- pixels[i] = vec4(0);
- }
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_attrs.stride_z);
-
- src_iter.current_offset_in_bytes -= int((z_index - z_index / uint(DEPTH_MULTIPLIER)) * src_attrs.step_z);
-
- vec4 w[3];
- w[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(weights_ptr, weights_iter);
- w[1] = LOAD_UNPACK4_HALF(weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 1, 0));
- w[2] = LOAD_UNPACK4_HALF(weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 2, 0));
-
- vec4 s[3];
- vec4 r;
- // first line
- s = LOAD_UNPACK_SWIZZLE(CURRENT_ITEM_OFFSET(src_iter));
-
- r = convolve1x3(s, w[0]);
- pixels[0] += r;
-
- // second line
- s = LOAD_UNPACK_SWIZZLE(TENSOR3D_OFFSET(src_iter, 0, 1, 0));
-
- r = convolve1x3(s, w[1]);
- pixels[0] += r;
- r = convolve1x3(s, w[0]);
- pixels[1] += r;
-
- // third line
- s = LOAD_UNPACK_SWIZZLE(TENSOR3D_OFFSET(src_iter, 0, 2, 0));
-
- r = convolve1x3(s, w[2]);
- pixels[0] += r;
- r = convolve1x3(s, w[1]);
- pixels[1] += r;
- r = convolve1x3(s, w[0]);
- pixels[2] += r;
-
- // forth line
- s = LOAD_UNPACK_SWIZZLE(TENSOR3D_OFFSET(src_iter, 0, 3, 0));
-
- r = convolve1x3(s, w[2]);
- pixels[1] += r;
- r = convolve1x3(s, w[1]);
- pixels[2] += r;
-
- // fifth line
- s = LOAD_UNPACK_SWIZZLE(TENSOR3D_OFFSET(src_iter, 0, 4, 0));
-
- r = convolve1x3(s, w[2]);
- pixels[2] += r;
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
-
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- if(z_index % uint(2) == uint(0))
- {
- b = vec2_b.x;
- }
- else
- {
- b = vec2_b.y;
- }
-
- for(int i = 0; i < 3; i++)
- {
- pixels[i] += vec4(b);
- }
-#endif /* BIAS */
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels[2]);
-}
-#elif defined(PROCESS_4X_1Y_1Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uvec2, weights_ptr, weights_shift, 3, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#if STRIDE_X == 3
-#define LOAD_UNPACK_SWIZZLE(offset) load_unpack_swizzle_stride3(offset)
-#elif STRIDE_X == 2
-#define LOAD_UNPACK_SWIZZLE(offset) load_unpack_swizzle_stride2(offset)
-#elif STRIDE_X == 1 /* STRIDE_X == 1 */
-#define LOAD_UNPACK_SWIZZLE(offset) load_unpack_swizzle_stride1(offset)
-#else /* STRIDE_X not equals 1 or 2 */
-#error STRIDE_X larger than 2 is not supported
-#endif /* STRIDE_X == 2 */
-
-vec4 convolve1x3(vec4 s[3], vec4 w)
-{
- vec4 r;
-
- r = s[0] * w[0] + s[1] * w[1] + s[2] * w[2];
-
- return r;
-}
-
-vec4[3] load_unpack_swizzle_stride1(uint offset)
-{
- vec4 s[2];
- s = VLOAD2_UNPACK8_HALF(src_ptr, offset);
-
- vec4 r[3];
- r[0] = s[0];
- r[1] = vec4(s[0].yzw, s[1].x);
- r[2] = vec4(s[0].zw, s[1].xy);
-
- return r;
-}
-
-vec4[3] load_unpack_swizzle_stride2(uint offset)
-{
- vec4 s[3];
- s[0] = LOAD_UNPACK4_HALF(src_ptr, offset);
- s[1] = LOAD_UNPACK4_HALF(src_ptr, offset + uint(1));
- s[2] = LOAD_UNPACK4_HALF(src_ptr, offset + uint(2));
-
- vec4 r[3];
- r[0] = vec4(s[0].xz, s[1].xz);
- r[1] = vec4(s[0].yw, s[1].yw);
- r[2] = vec4(s[0].z, s[1].xz, s[2].x);
-
- return r;
-}
-
-vec4[3] load_unpack_swizzle_stride3(uint offset)
-{
- vec4 s[3];
- s[0] = LOAD_UNPACK4_HALF(src_ptr, offset);
- s[1] = LOAD_UNPACK4_HALF(src_ptr, offset + uint(1));
- s[2] = LOAD_UNPACK4_HALF(src_ptr, offset + uint(2));
-
- vec4 r[3];
- r[0] = vec4(s[0].xw, s[1].z, s[2].y);
- r[1] = vec4(s[0].y, s[1].xw, s[2].z);
- r[2] = vec4(s[0].z, s[1].y, s[2].xw);
-
- return r;
-}
-
-void main()
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels = vec4(0.f);
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_attrs.stride_z);
-
- src_iter.current_offset_in_bytes -= int((z_index - z_index / uint(DEPTH_MULTIPLIER)) * src_attrs.step_z);
-
- vec4 w[3];
- w[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(weights_ptr, weights_iter);
- w[1] = LOAD_UNPACK4_HALF(weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 1, 0));
- w[2] = LOAD_UNPACK4_HALF(weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 2, 0));
-
- vec4 s[3];
- vec4 r;
- // first line
- s = LOAD_UNPACK_SWIZZLE(CURRENT_ITEM_OFFSET(src_iter));
-
- r = convolve1x3(s, w[0]);
- pixels += r;
-
- // second line
- s = LOAD_UNPACK_SWIZZLE(TENSOR3D_OFFSET(src_iter, 0, 1, 0));
-
- r = convolve1x3(s, w[1]);
- pixels += r;
-
- // third line
- s = LOAD_UNPACK_SWIZZLE(TENSOR3D_OFFSET(src_iter, 0, 2, 0));
-
- r = convolve1x3(s, w[2]);
- pixels += r;
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
-
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- if(z_index % uint(2) == uint(0))
- {
- b = vec2_b.x;
- }
- else
- {
- b = vec2_b.y;
- }
-
- pixels += vec4(b);
-#endif /* BIAS */
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels);
-}
-#endif /* PROCESS_4X_3Y_1Z */
-#endif /* DATA_TYPE_FP16 */
diff --git a/src/core/GLES_COMPUTE/cs_shaders/direct_convolution1x1.cs b/src/core/GLES_COMPUTE/cs_shaders/direct_convolution1x1.cs
deleted file mode 100644
index c455489468..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/direct_convolution1x1.cs
+++ /dev/null
@@ -1,1057 +0,0 @@
-/*
- * Copyright (c) 2017-2018 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-#ifdef FUSED_ACTIVATION
-#include "activation_layer_helpers_cs.h"
-#endif /* FUSED_ACTIVATION */
-
-#if defined(DATA_TYPE_FP16)
-precision mediump float;
-#endif // DATA_TYPE_FP16
-
-/** This kernel performs a direct convolution to convolve the low three dimensions.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- * @note This kernel has multiple optimized direct convolution options for FP16.
- * The direct convolution option must be passed at compile time using "#define PROCESS_nX_nY_nZ" e.g. "#define PROCESS_8X_1Y_1Z"
- * @note The convolution stride x must be passed at compile time using "#define STRIDE_X n" e.g. "#define STRIDE_X 1"
- * @note In case biases will be added to the convolution "#define HAS_BIAS" has to be passed to append the final matrix with 1 in each row.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_attrs The attributes of the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_attrs The attributes of the weights tensor
- * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- * @param[in] weights_depth The third dimensions of the weights tensors
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes dst_attrs;
- Tensor3DAttributes weights_attrs;
-#ifdef BIAS
- VectorAttributes biases_attrs;
-#endif /* BIAS */
- uint weights_stride_w;
- uint weights_depth;
-};
-
-#if defined(DATA_TYPE_FP32)
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, float, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, float, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- float pixels = 0.f;
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
- float temp;
- float temp_weight;
- for(int d = 0; d < int(weights_depth); ++d)
- {
- temp = LOAD_CURRENT_ITEM(src_ptr, src_iter);
- temp_weight = LOAD_CURRENT_ITEM(weights_ptr, weights_iter);
- pixels += temp * temp_weight;
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-
-#ifdef BIAS
- pixels += LOAD(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels = ACT_OP(pixels);
-#endif /* FUSED_ACTIVATION */
-
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, pixels);
-}
-
-#elif defined(DATA_TYPE_FP16)
-#if defined(PROCESS_4X_1Y_1Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uint, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#if STRIDE_X == 2
-#define CONVOLVE(s, w) convolve_stride2(s, w)
-#elif STRIDE_X == 1 /* STRIDE_X == 1 */
-#define CONVOLVE(s, w) convolve_stride1(s, w)
-#else /* STRIDE_X not equals 1 or 2 */
-#error STRIDE_X larger than 2 is not supported
-#endif /* STRIDE_X == 2 */
-
-vec4 convolve_stride1(ImageIterator src_iter, float w)
-{
- vec4 s;
- s = LOAD_UNPACK4_CURRENT_ITEM_HALF(src_ptr, src_iter);
-
- s *= w;
-
- return s;
-}
-
-vec4 convolve_stride2(ImageIterator src_iter, float w)
-{
- vec4 s[2];
- vec4 r;
-
- s[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(src_ptr, src_iter);
- s[1] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 4, 0));
- r = vec4(s[0].xz, s[1].xz);
-
- r *= w;
-
- return r;
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels = vec4(0.f);
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
-#ifdef WEIGHTS_OPTIMIZATION
- float w1, w2;
- int nums = (int(weights_depth)) / 2;
- for(int d = 0; d < nums; ++d)
- {
- vec2 vec2_w = LOAD_UNPACK2_CURRENT_ITEM_HALF(weights_ptr, weights_iter);
-
- w1 = vec2_w.x;
- vec4 r1 = CONVOLVE(src_iter, w1);
- pixels += r1;
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
-
- w2 = vec2_w.y;
- vec4 r2 = CONVOLVE(src_iter, w2);
- pixels += r2;
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z * uint(2));
- }
-#else /* WEIGHTS_OPTIMIZATION */
- float w;
- for(int d = 0; d < int(weights_depth); ++d)
- {
- w = LOAD_UNPACK2_CURRENT_ITEM_HALF(weights_ptr, weights_iter).x;
-
- vec4 r = CONVOLVE(src_iter, w);
- pixels += r;
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-#endif /* WEIGHTS_OPTIMIZATION */
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
-
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- if(z_index % uint(2) == uint(0))
- {
- b = vec2_b.x;
- }
- else
- {
- b = vec2_b.y;
- }
-
- pixels += b;
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels = ACT_OP(pixels);
-#endif /* FUSED_ACTIVATION */
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels);
-}
-#elif defined(PROCESS_4X_2Y_1Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uint, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#if STRIDE_X == 2
-#define CONVOLVE(s, w) convolve_stride2(s, w)
-#elif STRIDE_X == 1 /* STRIDE_X == 1 */
-#define CONVOLVE(s, w) convolve_stride1(s, w)
-#else /* STRIDE_X not equals 1 or 2 */
-#error STRIDE_X larger than 2 is not supported
-#endif /* STRIDE_X == 2 */
-
-vec4[2] convolve_stride1(ImageIterator src_iter, float w)
-{
- vec4 s[2];
- s[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(src_ptr, src_iter);
- s[1] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, int(STRIDE_Y)));
-
- s[0] *= w;
- s[1] *= w;
-
- return s;
-}
-
-vec4[2] convolve_stride2(ImageIterator src_iter, float w)
-{
- vec4 s1[2];
- vec4 s2[2];
- vec4 r[2];
-
- s1[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(src_ptr, src_iter);
- s1[1] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 4, 0));
- r[0] = vec4(s1[0].xz, s1[1].xz);
-
- s2[0] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, int(STRIDE_Y)));
- s2[1] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 4, int(STRIDE_Y)));
- r[1] = vec4(s2[0].xz, s2[1].xz);
-
- r[0] *= w;
- r[1] *= w;
-
- return r;
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels[2];
- pixels[0] = vec4(0.f);
- pixels[1] = vec4(0.f);
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
-#ifdef WEIGHTS_OPTIMIZATION
- float w1, w2;
- int nums = (int(weights_depth)) / 2;
- for(int d = 0; d < nums; ++d)
- {
- vec2 vec2_w = LOAD_UNPACK2_CURRENT_ITEM_HALF(weights_ptr, weights_iter);
-
- w1 = vec2_w.x;
- vec4 r1[2] = CONVOLVE(src_iter, w1);
- pixels[0] += r1[0];
- pixels[1] += r1[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
-
- w2 = vec2_w.y;
- vec4 r2[2] = CONVOLVE(src_iter, w2);
- pixels[0] += r2[0];
- pixels[1] += r2[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z * uint(2));
- }
-#else /* WEIGHTS_OPTIMIZATION */
- float w;
- for(int d = 0; d < int(weights_depth); ++d)
- {
- w = LOAD_UNPACK2_CURRENT_ITEM_HALF(weights_ptr, weights_iter).x;
-
- vec4 r[2] = CONVOLVE(src_iter, w);
- pixels[0] += r[0];
- pixels[1] += r[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-#endif /* WEIGHTS_OPTIMIZATION */
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
-
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- if(z_index % uint(2) == uint(0))
- {
- b = vec2_b.x;
- }
- else
- {
- b = vec2_b.y;
- }
-
- pixels[0] += b;
- pixels[1] += b;
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels[0] = ACT_OP(pixels[0]);
- pixels[1] = ACT_OP(pixels[1]);
-#endif /* FUSED_ACTIVATION */
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
-}
-#elif defined(PROCESS_4X_3Y_1Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uint, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#if STRIDE_X == 2
-#define CONVOLVE(s, w) convolve_stride2(s, w)
-#elif STRIDE_X == 1 /* STRIDE_X == 1 */
-#define CONVOLVE(s, w) convolve_stride1(s, w)
-#else /* STRIDE_X not equals 1 or 2 */
-#error STRIDE_X larger than 2 is not supported
-#endif /* STRIDE_X == 2 */
-
-vec4[3] convolve_stride1(ImageIterator src_iter, float w)
-{
- vec4 s[3];
- s[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(src_ptr, src_iter);
- s[1] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, int(STRIDE_Y)));
- s[2] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, (2 * int(STRIDE_Y))));
-
- s[0] *= w;
- s[1] *= w;
- s[2] *= w;
-
- return s;
-}
-
-vec4[3] convolve_stride2(ImageIterator src_iter, float w)
-{
- vec4 s1[2];
- vec4 s2[2];
- vec4 s3[2];
- vec4 r[3];
-
- s1[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(src_ptr, src_iter);
- s1[1] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 4, 0));
- r[0] = vec4(s1[0].xz, s1[1].xz);
-
- s2[0] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, int(STRIDE_Y)));
- s2[1] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 4, int(STRIDE_Y)));
- r[1] = vec4(s2[0].xz, s2[1].xz);
-
- s3[0] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, (2 * int(STRIDE_Y))));
- s3[1] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 4, (2 * int(STRIDE_Y))));
- r[2] = vec4(s3[0].xz, s3[1].xz);
-
- r[0] *= w;
- r[1] *= w;
- r[2] *= w;
-
- return r;
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels[3];
- pixels[0] = vec4(0.f);
- pixels[1] = vec4(0.f);
- pixels[2] = vec4(0.f);
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
-#ifdef WEIGHTS_OPTIMIZATION
- float w1, w2;
- int nums = (int(weights_depth)) / 2;
- for(int d = 0; d < nums; ++d)
- {
- vec2 vec2_w = LOAD_UNPACK2_CURRENT_ITEM_HALF(weights_ptr, weights_iter);
-
- w1 = vec2_w.x;
- vec4 r1[3] = CONVOLVE(src_iter, w1);
- pixels[0] += r1[0];
- pixels[1] += r1[1];
- pixels[2] += r1[2];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
-
- w2 = vec2_w.y;
- vec4 r2[3] = CONVOLVE(src_iter, w2);
- pixels[0] += r2[0];
- pixels[1] += r2[1];
- pixels[2] += r2[2];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z * uint(2));
- }
-#else /* WEIGHTS_OPTIMIZATION */
- float w;
- for(int d = 0; d < int(weights_depth); ++d)
- {
- w = LOAD_UNPACK2_CURRENT_ITEM_HALF(weights_ptr, weights_iter).x;
-
- vec4 r[3] = CONVOLVE(src_iter, w);
- pixels[0] += r[0];
- pixels[1] += r[1];
- pixels[2] += r[2];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-#endif /* WEIGHTS_OPTIMIZATION */
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
-
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- if(z_index % uint(2) == uint(0))
- {
- b = vec2_b.x;
- }
- else
- {
- b = vec2_b.y;
- }
-
- pixels[0] += b;
- pixels[1] += b;
- pixels[2] += b;
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels[0] = ACT_OP(pixels[0]);
- pixels[1] = ACT_OP(pixels[1]);
- pixels[2] = ACT_OP(pixels[2]);
-#endif /* FUSED_ACTIVATION */
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels[2]);
-}
-#elif defined(PROCESS_4X_4Y_1Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uint, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#if STRIDE_X == 2
-#define CONVOLVE(s, w, x1, y1) convolve_stride2(s, w, x1, y1)
-#elif STRIDE_X == 1 /* STRIDE_X == 1 */
-#define CONVOLVE(s, w, x1, y1) convolve_stride1(s, w, x1, y1)
-#else /* STRIDE_X not equals 1 or 2 */
-#error STRIDE_X larger than 2 is not supported
-#endif /* STRIDE_X == 2 */
-
-vec4[2] convolve_stride1(ImageIterator src_iter, float w, int x1, int y1)
-{
- vec4 s[2];
- s[0] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, x1, y1));
- s[1] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, x1, (y1 + int(STRIDE_Y))));
-
- s[0] *= w;
- s[1] *= w;
-
- return s;
-}
-
-vec4[2] convolve_stride2(ImageIterator src_iter, float w, int x1, int y1)
-{
- vec4 s1[2];
- vec4 s2[2];
- vec4 r[2];
-
- s1[0] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, x1, y1));
- s1[1] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, (4 + x1), y1));
- r[0] = vec4(s1[0].xz, s1[1].xz);
-
- s2[0] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, x1, (y1 + int(STRIDE_Y))));
- s2[1] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, (4 + x1), (y1 + int(STRIDE_Y))));
- r[1] = vec4(s2[0].xz, s2[1].xz);
-
- r[0] *= w;
- r[1] *= w;
-
- return r;
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels[2];
- vec4 pixels1[2];
- pixels[0] = vec4(0.f);
- pixels[1] = vec4(0.f);
- pixels1[0] = vec4(0.f);
- pixels1[1] = vec4(0.f);
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
-#ifdef WEIGHTS_OPTIMIZATION
- float w1, w2;
- int nums = (int(weights_depth)) / 2;
- for(int d = 0; d < nums; ++d)
- {
- vec2 vec2_w = LOAD_UNPACK2_CURRENT_ITEM_HALF(weights_ptr, weights_iter);
-
- w1 = vec2_w.x;
- vec4 r1[2] = CONVOLVE(src_iter, w1, 0, 0);
- vec4 r2[2] = CONVOLVE(src_iter, w1, 0, (2 * int(STRIDE_Y)));
- pixels[0] += r1[0];
- pixels[1] += r1[1];
- pixels1[0] += r2[0];
- pixels1[1] += r2[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
-
- w2 = vec2_w.y;
- vec4 r3[2] = CONVOLVE(src_iter, w2, 0, 0);
- vec4 r4[2] = CONVOLVE(src_iter, w2, 0, (2 * int(STRIDE_Y)));
- pixels[0] += r3[0];
- pixels[1] += r3[1];
- pixels1[0] += r4[0];
- pixels1[1] += r4[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z * uint(2));
- }
-#else /* WEIGHTS_OPTIMIZATION */
- float w;
- for(int d = 0; d < int(weights_depth); ++d)
- {
- w = LOAD_UNPACK2_CURRENT_ITEM_HALF(weights_ptr, weights_iter).x;
-
- vec4 r1[2] = CONVOLVE(src_iter, w, 0, 0);
- vec4 r2[2] = CONVOLVE(src_iter, w, 0, (2 * int(STRIDE_Y)));
- pixels[0] += r1[0];
- pixels[1] += r1[1];
- pixels1[0] += r2[0];
- pixels1[1] += r2[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-#endif /* WEIGHTS_OPTIMIZATION */
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
-
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- if(z_index % uint(2) == uint(0))
- {
- b = vec2_b.x;
- }
- else
- {
- b = vec2_b.y;
- }
-
- pixels[0] += b;
- pixels[1] += b;
- pixels1[0] += b;
- pixels1[1] += b;
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels[0] = ACT_OP(pixels[0]);
- pixels[1] = ACT_OP(pixels[1]);
- pixels1[0] = ACT_OP(pixels1[0]);
- pixels1[1] = ACT_OP(pixels1[1]);
-#endif /* FUSED_ACTIVATION */
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels1[0]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 3, 0), pixels1[1]);
-}
-#elif defined(PROCESS_4X_2Y_2Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uint, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#if STRIDE_X == 2
-#define CONVOLVE(s, w) convolve_stride2(s, w)
-#elif STRIDE_X == 1 /* STRIDE_X == 1 */
-#define CONVOLVE(s, w) convolve_stride1(s, w)
-#else /* STRIDE_X not equals 1 or 2 */
-#error STRIDE_X larger than 2 is not supported
-#endif /* STRIDE_X == 2 */
-
-vec4[2] convolve_stride1(ImageIterator src_iter, float w)
-{
- vec4 s[2];
- s[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(src_ptr, src_iter);
- s[1] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, int(STRIDE_Y)));
-
- s[0] *= w;
- s[1] *= w;
-
- return s;
-}
-
-vec4[2] convolve_stride2(ImageIterator src_iter, float w)
-{
- vec4 s1[2];
- vec4 s2[2];
- vec4 r[2];
-
- s1[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(src_ptr, src_iter);
- s1[1] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 4, 0));
- r[0] = vec4(s1[0].xz, s1[1].xz);
-
- s2[0] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, int(STRIDE_Y)));
- s2[1] = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 4, int(STRIDE_Y)));
- r[1] = vec4(s2[0].xz, s2[1].xz);
-
- r[0] *= w;
- r[1] *= w;
-
- return r;
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- uint z_base_index = uint(gl_GlobalInvocationID.z) << uint(1);
-
- // store orginal src current offset
- int s_offset_in_bytes = src_iter.current_offset_in_bytes;
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_base_index * weights_stride_w);
-
- for(int z = 0; z < 2; ++z)
- {
- uint z_index = z_base_index + uint(z);
-
- src_iter.current_offset_in_bytes = s_offset_in_bytes;
-
- vec4 pixels[2];
- pixels[0] = vec4(0.f);
- pixels[1] = vec4(0.f);
-
-#ifdef WEIGHTS_OPTIMIZATION
- float w1, w2;
- int nums = (int(weights_depth)) / 2;
- for(int d = 0; d < nums; ++d)
- {
- vec2 vec2_w = LOAD_UNPACK2_CURRENT_ITEM_HALF(weights_ptr, weights_iter);
-
- w1 = vec2_w.x;
- vec4 r1[2] = CONVOLVE(src_iter, w1);
- pixels[0] += r1[0];
- pixels[1] += r1[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
-
- w2 = vec2_w.y;
- vec4 r2[2] = CONVOLVE(src_iter, w2);
- pixels[0] += r2[0];
- pixels[1] += r2[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z * uint(2));
- }
-#else /* WEIGHTS_OPTIMIZATION */
- float w;
- for(int d = 0; d < int(weights_depth); ++d)
- {
- w = LOAD_UNPACK2_CURRENT_ITEM_HALF(weights_ptr, weights_iter).x;
-
- vec4 r[2] = CONVOLVE(src_iter, w);
- pixels[0] += r[0];
- pixels[1] += r[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-#endif /* WEIGHTS_OPTIMIZATION */
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
-
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- if(z_index % uint(2) == uint(0))
- {
- b = vec2_b.x;
- }
- else
- {
- b = vec2_b.y;
- }
-
- pixels[0] += b;
- pixels[1] += b;
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels[0] = ACT_OP(pixels[0]);
- pixels[1] = ACT_OP(pixels[1]);
-#endif /* FUSED_ACTIVATION */
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_attrs.stride_z);
- }
-}
-#elif defined(PROCESS_8X_1Y_1Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uint, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#if STRIDE_X == 2
-#define CONVOLVE(s, w) convolve_stride2(s, w)
-#elif STRIDE_X == 1 /* STRIDE_X == 1 */
-#define CONVOLVE(s, w) convolve_stride1(s, w)
-#else /* STRIDE_X not equals 1 or 2 */
-#error STRIDE_X larger than 2 is not supported
-#endif /* STRIDE_X == 2 */
-
-vec4[2] convolve_stride1(ImageIterator src_iter, float w)
-{
- vec4 s[2];
- s = LOAD_UNPACK8_CURRENT_ITEM_HALF(src_ptr, src_iter);
-
- s[0] *= w;
- s[1] *= w;
-
- return s;
-}
-
-vec4[2] convolve_stride2(ImageIterator src_iter, float w)
-{
- vec4 s1[2];
- vec4 s2[2];
- vec4 r[2];
-
- s1 = LOAD_UNPACK8_CURRENT_ITEM_HALF(src_ptr, src_iter);
- r[0] = vec4(s1[0].xz, s1[1].xz);
- s2 = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 8, 0));
- r[1] = vec4(s2[0].xz, s2[1].xz);
-
- r[0] *= w;
- r[1] *= w;
-
- return r;
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels[2];
- pixels[0] = vec4(0.f);
- pixels[1] = vec4(0.f);
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
-#ifdef WEIGHTS_OPTIMIZATION
- float w1, w2;
- int nums = (int(weights_depth)) / 2;
- for(int d = 0; d < nums; ++d)
- {
- vec2 vec2_w = LOAD_UNPACK2_CURRENT_ITEM_HALF(weights_ptr, weights_iter);
-
- w1 = vec2_w.x;
- vec4 r1[2] = CONVOLVE(src_iter, w1);
- pixels[0] += r1[0];
- pixels[1] += r1[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
-
- w2 = vec2_w.y;
- vec4 r2[2] = CONVOLVE(src_iter, w2);
- pixels[0] += r2[0];
- pixels[1] += r2[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z * uint(2));
- }
-#else /* WEIGHTS_OPTIMIZATION */
- float w;
- for(int d = 0; d < int(weights_depth); ++d)
- {
- w = LOAD_UNPACK2_CURRENT_ITEM_HALF(weights_ptr, weights_iter).x;
-
- vec4 r[2] = CONVOLVE(src_iter, w);
- pixels[0] += r[0];
- pixels[1] += r[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-#endif /* WEIGHTS_OPTIMIZATION */
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
-
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- if(z_index % uint(2) == uint(0))
- {
- b = vec2_b.x;
- }
- else
- {
- b = vec2_b.y;
- }
-
- pixels[0] += b;
- pixels[1] += b;
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels[0] = ACT_OP(pixels[0]);
- pixels[1] = ACT_OP(pixels[1]);
-#endif /* FUSED_ACTIVATION */
-
- STORE_PACK8_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels);
-}
-#elif defined(PROCESS_8X_2Y_1Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uint, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#if STRIDE_X == 2
-#define CONVOLVE(s, w, x1, y1) convolve_stride2(s, w, x1, y1)
-#elif STRIDE_X == 1 /* STRIDE_X == 1 */
-#define CONVOLVE(s, w, x1, y1) convolve_stride1(s, w, x1, y1)
-#else /* STRIDE_X not equals 1 or 2 */
-#error STRIDE_X larger than 2 is not supported
-#endif /* STRIDE_X == 2 */
-
-vec4[2] convolve_stride1(ImageIterator src_iter, float w, int x1, int y1)
-{
- vec4 s[2];
- s = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, x1, y1));
-
- s[0] *= w;
- s[1] *= w;
-
- return s;
-}
-
-vec4[2] convolve_stride2(ImageIterator src_iter, float w, int x1, int y1)
-{
- vec4 s1[2];
- vec4 s2[2];
- vec4 r[2];
-
- s1 = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, x1, y1));
- r[0] = vec4(s1[0].xz, s1[1].xz);
- s2 = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, (8 + x1), y1));
- r[1] = vec4(s2[0].xz, s2[1].xz);
-
- r[0] *= w;
- r[1] *= w;
-
- return r;
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels[2];
- vec4 pixels1[2];
- pixels[0] = vec4(0.f);
- pixels[1] = vec4(0.f);
- pixels1[0] = vec4(0.f);
- pixels1[1] = vec4(0.f);
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
-#ifdef WEIGHTS_OPTIMIZATION
- float w1, w2;
- int nums = (int(weights_depth)) / 2;
- for(int d = 0; d < nums; ++d)
- {
- vec2 vec2_w = LOAD_UNPACK2_CURRENT_ITEM_HALF(weights_ptr, weights_iter);
-
- w1 = vec2_w.x;
- vec4 r1[2] = CONVOLVE(src_iter, w1, 0, 0);
- vec4 r2[2] = CONVOLVE(src_iter, w1, 0, (int(STRIDE_Y)));
- pixels[0] += r1[0];
- pixels[1] += r1[1];
- pixels1[0] += r2[0];
- pixels1[1] += r2[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
-
- w2 = vec2_w.y;
- vec4 r3[2] = CONVOLVE(src_iter, w2, 0, 0);
- vec4 r4[2] = CONVOLVE(src_iter, w2, 0, (int(STRIDE_Y)));
- pixels[0] += r3[0];
- pixels[1] += r3[1];
- pixels1[0] += r4[0];
- pixels1[1] += r4[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z * uint(2));
- }
-#else /* WEIGHTS_OPTIMIZATION */
- float w;
- for(int d = 0; d < int(weights_depth); ++d)
- {
- w = LOAD_UNPACK2_CURRENT_ITEM_HALF(weights_ptr, weights_iter).x;
-
- vec4 r1[2] = CONVOLVE(src_iter, w, 0, 0);
- vec4 r2[2] = CONVOLVE(src_iter, w, 0, (int(STRIDE_Y)));
- pixels[0] += r1[0];
- pixels[1] += r1[1];
- pixels1[0] += r2[0];
- pixels1[1] += r2[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-#endif /* WEIGHTS_OPTIMIZATION */
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
-
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- if(z_index % uint(2) == uint(0))
- {
- b = vec2_b.x;
- }
- else
- {
- b = vec2_b.y;
- }
-
- pixels[0] += b;
- pixels[1] += b;
- pixels1[0] += b;
- pixels1[1] += b;
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels[0] = ACT_OP(pixels[0]);
- pixels[1] = ACT_OP(pixels[1]);
- pixels1[0] = ACT_OP(pixels1[0]);
- pixels1[1] = ACT_OP(pixels1[1]);
-#endif /* FUSED_ACTIVATION */
-
- STORE_PACK8_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels);
- STORE_PACK8_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels1);
-}
-#endif /* PROCESS_4X_1Y_1Z */
-#else /* DATA_TYPE_FP32 */
-#error Data type not supported
-#endif /* DATA_TYPE_FP32 */
diff --git a/src/core/GLES_COMPUTE/cs_shaders/direct_convolution3x3.cs b/src/core/GLES_COMPUTE/cs_shaders/direct_convolution3x3.cs
deleted file mode 100644
index c9a2121a88..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/direct_convolution3x3.cs
+++ /dev/null
@@ -1,1155 +0,0 @@
-/*
- * Copyright (c) 2017-2018 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-#ifdef FUSED_ACTIVATION
-#include "activation_layer_helpers_cs.h"
-#endif /* FUSED_ACTIVATION */
-
-#if defined(DATA_TYPE_FP16)
-precision mediump float;
-#endif // DATA_TYPE_FP16
-
-/** This kernel performs a direct convolution to convolve the low three dimensions.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- * @note This kernel has multiple optimized direct convolution options for FP16.
- * The direct convolution option must be passed at compile time using "#define PROCESS_nX_nY_nZ" e.g. "#define PROCESS_8X_1Y_1Z"
- * @note The convolution stride x must be passed at compile time using "#define STRIDE_X n" e.g. "#define STRIDE_X 1"
- * This OpenGL ES shader works with stride_x = 1 and 2
- * @note In case biases will be added to the convolution "#define HAS_BIAS" has to be passed to append the final matrix with 1 in each row.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_attrs The attributes of the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_attrs The attributes of the weights tensor
- * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- * @param[in] weights_depth The third dimensions of the weights tensors
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes dst_attrs;
- Tensor3DAttributes weights_attrs;
-#ifdef BIAS
- VectorAttributes biases_attrs;
-#endif /* BIAS */
- uint weights_stride_w;
- uint weights_depth;
-};
-
-#if defined(DATA_TYPE_FP32)
-#if defined(PROCESS_1X_1Y_1Z)
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, float, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, float, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- float pixels = 0.f;
-
- uint z_index = gl_GlobalInvocationID.z;
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
- for(int d = 0; d < int(weights_depth); ++d)
- {
- vec3 temp;
- vec3 w;
-
- temp = VLOAD3(vec3, src_ptr, IMAGE_OFFSET(src_iter, 0, 0));
- w = VLOAD3(vec3, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 0, 0));
-
- pixels += temp.x * w[0] + temp.y * w[1] + temp.z * w[2];
-
- temp = VLOAD3(vec3, src_ptr, IMAGE_OFFSET(src_iter, 0, 1));
- w = VLOAD3(vec3, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 1, 0));
-
- pixels += temp.x * w[0] + temp.y * w[1] + temp.z * w[2];
-
- temp = VLOAD3(vec3, src_ptr, IMAGE_OFFSET(src_iter, 0, 2));
- w = VLOAD3(vec3, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 2, 0));
-
- pixels += temp.x * w[0] + temp.y * w[1] + temp.z * w[2];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-
-#ifdef BIAS
- pixels += LOAD(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels = ACT_OP(pixels);
-#endif /* FUSED_ACTIVATION */
-
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, pixels);
-}
-
-#elif defined(PROCESS_8X_1Y_1Z)
-
-TENSOR_DECLARATION(1, srcBuffer, vec4, src_ptr, src_shift, 4, readonly);
-TENSOR_DECLARATION(2, dstBuffer, vec4, dst_ptr, dst_shift, 4, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, float, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, float, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#if STRIDE_X == 2
-#define CONVOLVE1x3(offset, w) convolve1x3_stride2(offset, w)
-#elif STRIDE_X == 1 /* STRIDE_X == 1 */
-#define CONVOLVE1x3(offset, w) convolve1x3_stride1(offset, w)
-#else /* STRIDE_X not equals 1 or 2 */
-#error STRIDE_X larger than 2 is not supported
-#endif /* STRIDE_X == 2 */
-
-vec4[2] convolve1x3_stride1(uint offset, vec3 w)
-{
- vec4 middle;
- vec4 right;
- vec4 tmp[3];
- vec4 r[2];
-
- tmp = VLOAD3(vec4[3], src_ptr, offset);
-
- middle = vec4(tmp[0].yzw, tmp[1].x);
- right = vec4(tmp[0].zw, tmp[1].xy);
-
- r[0] = tmp[0] * w[0] + middle * w[1] + right * w[2];
-
- middle = vec4(tmp[1].yzw, tmp[2].x);
- right = vec4(tmp[1].zw, tmp[2].xy);
-
- r[1] = tmp[1] * w[0] + middle * w[1] + right * w[2];
-
- return r;
-}
-
-vec4[2] convolve1x3_stride2(uint offset, vec3 w)
-{
- vec4 left;
- vec4 middle;
- vec4 right;
- vec4 tmp1[3];
- vec4 tmp2[2];
- vec4 r[2];
-
- tmp1 = VLOAD3(vec4[3], src_ptr, offset);
-
- left = vec4(tmp1[0].xz, tmp1[1].xz);
- middle = vec4(tmp1[0].yw, tmp1[1].yw);
- right = vec4(tmp1[0].z, tmp1[1].xz, tmp1[2].x);
-
- r[0] = left * w[0] + middle * w[1] + right * w[2];
-
- tmp2 = VLOAD2(vec4[2], src_ptr, offset + uint(3));
-
- left = vec4(tmp1[2].xz, tmp2[0].xz);
- middle = vec4(tmp1[2].yw, tmp2[0].yw);
- right = vec4(tmp1[2].z, tmp2[0].xz, tmp2[1].x);
-
- r[1] = left * w[0] + middle * w[1] + right * w[2];
-
- return r;
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels[2];
- pixels[0] = vec4(0);
- pixels[1] = vec4(0);
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
- for(int d = 0; d < int(weights_depth); ++d)
- {
- // load 3 weights once
- vec3 w;
- vec4 r[2];
-
- // first line
- w = VLOAD3(vec3, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 0, 0));
-
- r = CONVOLVE1x3(CURRENT_ITEM_OFFSET(src_iter), w);
- pixels[0] += r[0];
- pixels[1] += r[1];
-
- // second line
- w = VLOAD3(vec3, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 1, 0));
-
- r = CONVOLVE1x3(IMAGE_OFFSET(src_iter, 0, 1), w);
- pixels[0] += r[0];
- pixels[1] += r[1];
-
- // third line
- w = VLOAD3(vec3, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 2, 0));
-
- r = CONVOLVE1x3(IMAGE_OFFSET(src_iter, 0, 2), w);
- pixels[0] += r[0];
- pixels[1] += r[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-
-#ifdef BIAS
- float b = LOAD(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
- pixels[0] += vec4(b);
- pixels[1] += vec4(b);
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels[0] = ACT_OP(pixels[0]);
- pixels[1] = ACT_OP(pixels[1]);
-#endif /* FUSED_ACTIVATION */
-
- VSTORE2_CURRENT_ITEM(dst_ptr, dst_iter, pixels);
-}
-
-#elif defined(PROCESS_4X_1Y_1Z)
-
-TENSOR_DECLARATION(1, srcBuffer, vec4, src_ptr, src_shift, 4, readonly);
-TENSOR_DECLARATION(2, dstBuffer, vec4, dst_ptr, dst_shift, 4, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, float, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, float, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#if STRIDE_X == 2
-#define CONVOLVE1x3(offset, w) convolve1x3_stride2(offset, w)
-#elif STRIDE_X == 1 /* STRIDE_X == 1 */
-#define CONVOLVE1x3(offset, w) convolve1x3_stride1(offset, w)
-#else /* STRIDE_X not equals 1 or 2 */
-#error STRIDE_X larger than 2 is not supported
-#endif /* STRIDE_X == 2 */
-
-vec4 convolve1x3_stride1(uint offset, vec3 w)
-{
- vec4 tmp[2];
- vec4 middle;
- vec4 right;
-
- tmp = VLOAD2(vec4[2], src_ptr, offset);
-
- middle = vec4(tmp[0].yzw, tmp[1].x);
- right = vec4(tmp[0].zw, tmp[1].xy);
-
- tmp[1] = tmp[0] * w[0] + middle * w[1] + right * w[2];
-
- return tmp[1];
-}
-
-vec4 convolve1x3_stride2(uint offset, vec3 w)
-{
- vec4 left;
- vec4 middle;
- vec4 right;
-
- vec4 tmp[3];
-
- tmp = VLOAD3(vec4[3], src_ptr, offset);
-
- left = vec4(tmp[0].xz, tmp[1].xz);
- middle = vec4(tmp[0].yw, tmp[1].yw);
- right = vec4(tmp[0].z, tmp[1].xz, tmp[2].x);
-
- tmp[0] = left * w[0] + middle * w[1] + right * w[2];
-
- return tmp[0];
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels;
- pixels = vec4(0.f);
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
- for(int d = 0; d < int(weights_depth); ++d)
- {
- // load 3 weights once
- vec3 w;
-
- // first line
- w = VLOAD3(vec3, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 0, 0));
- pixels += CONVOLVE1x3(CURRENT_ITEM_OFFSET(src_iter), w);
-
- // second line
- w = VLOAD3(vec3, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 1, 0));
- pixels += CONVOLVE1x3(IMAGE_OFFSET(src_iter, 0, 1), w);
-
- // third line
- w = VLOAD3(vec3, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 2, 0));
- pixels += CONVOLVE1x3(IMAGE_OFFSET(src_iter, 0, 2), w);
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-
-#ifdef BIAS
- float b = LOAD(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
- pixels += b;
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels = ACT_OP(pixels);
-#endif /* FUSED_ACTIVATION */
-
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, pixels);
-}
-
-#elif defined(PROCESS_4X_3Y_1Z)
-
-TENSOR_DECLARATION(1, srcBuffer, vec4, src_ptr, src_shift, 4, readonly);
-TENSOR_DECLARATION(2, dstBuffer, vec4, dst_ptr, dst_shift, 4, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, float, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, float, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#define CONVOLVE1x3(left, middle, right, w) convolve1x3_stride1(left, middle, right, w)
-
-vec4 convolve1x3_stride1(vec4 left, vec4 middle, vec4 right, vec3 w)
-{
- vec4 r;
-
- r = left * w[0] + middle * w[1] + right * w[2];
-
- return r;
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels[3];
- pixels[0] = vec4(0);
- pixels[1] = vec4(0);
- pixels[2] = vec4(0);
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
- for(int d = 0; d < int(weights_depth); ++d)
- {
- // load 3 weights once
- vec3 w[3];
-
- w[0] = VLOAD3(vec3, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 0, 0));
- w[1] = VLOAD3(vec3, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 1, 0));
- w[2] = VLOAD3(vec3, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 2, 0));
-
- vec4 s[2];
- vec4 middle;
- vec4 right;
- // first line
- s = VLOAD2_CURRENT_ITEM(vec4[2], src_ptr, src_iter);
- middle = vec4(s[0].yzw, s[1].x);
- right = vec4(s[0].zw, s[1].xy);
- pixels[0] += CONVOLVE1x3(s[0], middle, right, w[0]);
-
- // second line
- s = VLOAD2(vec4[2], src_ptr, IMAGE_OFFSET(src_iter, 0, 1));
- middle = vec4(s[0].yzw, s[1].x);
- right = vec4(s[0].zw, s[1].xy);
- pixels[0] += CONVOLVE1x3(s[0], middle, right, w[1]);
- pixels[1] += CONVOLVE1x3(s[0], middle, right, w[0]);
-
- // third line
- s = VLOAD2(vec4[2], src_ptr, IMAGE_OFFSET(src_iter, 0, 2));
- middle = vec4(s[0].yzw, s[1].x);
- right = vec4(s[0].zw, s[1].xy);
- pixels[0] += CONVOLVE1x3(s[0], middle, right, w[2]);
- pixels[1] += CONVOLVE1x3(s[0], middle, right, w[1]);
- pixels[2] += CONVOLVE1x3(s[0], middle, right, w[0]);
-
- // forth line
- s = VLOAD2(vec4[2], src_ptr, IMAGE_OFFSET(src_iter, 0, 3));
- middle = vec4(s[0].yzw, s[1].x);
- right = vec4(s[0].zw, s[1].xy);
- pixels[1] += CONVOLVE1x3(s[0], middle, right, w[2]);
- pixels[2] += CONVOLVE1x3(s[0], middle, right, w[1]);
-
- // fifth line
- s = VLOAD2(vec4[2], src_ptr, IMAGE_OFFSET(src_iter, 0, 4));
- middle = vec4(s[0].yzw, s[1].x);
- right = vec4(s[0].zw, s[1].xy);
- pixels[2] += CONVOLVE1x3(s[0], middle, right, w[2]);
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-
-#ifdef BIAS
- float b = LOAD(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- pixels[0] += vec4(b);
- pixels[1] += vec4(b);
- pixels[2] += vec4(b);
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels[0] = ACT_OP(pixels[0]);
- pixels[1] = ACT_OP(pixels[1]);
- pixels[2] = ACT_OP(pixels[2]);
-#endif /* FUSED_ACTIVATION */
-
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, pixels[0]);
- STORE(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
- STORE(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels[2]);
-}
-
-#endif // PROCESS_nX_nY
-
-#elif defined(DATA_TYPE_FP16)
-
-#if defined(PROCESS_8X_3Y_1Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uint, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#define CONVOLVE1x3(s, w) convolve1x3_stride1(s, w)
-
-vec4[2] convolve1x3_stride1(vec4 tmp[3], vec3 w)
-{
- vec4 middle;
- vec4 right;
- vec4 r[2];
-
- middle = vec4(tmp[0].yzw, tmp[1].x);
- right = vec4(tmp[0].zw, tmp[1].xy);
-
- r[0] = tmp[0] * w[0] + middle * w[1] + right * w[2];
-
- middle = vec4(tmp[1].yzw, tmp[2].x);
- right = vec4(tmp[1].zw, tmp[2].xy);
-
- r[1] = tmp[1] * w[0] + middle * w[1] + right * w[2];
-
- return r;
-}
-
-vec4[3] vload2_src_unpack12_half(uint offset)
-{
- uvec4 packed_s[2];
- vec4 s[3];
-
- packed_s = VLOAD2(uvec4[2], src_ptr, offset);
-
- s[0] = vec4(unpackHalf2x16(packed_s[0].x), unpackHalf2x16(packed_s[0].y));
- s[1] = vec4(unpackHalf2x16(packed_s[0].z), unpackHalf2x16(packed_s[0].w));
- s[2] = vec4(unpackHalf2x16(packed_s[1].x), unpackHalf2x16(packed_s[1].y));
-
- return s;
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels[3][2];
- int i, j;
- for(i = 0; i < 3; i++)
- {
- for(j = 0; j < 2; j++)
- {
- pixels[i][j] = vec4(0);
- }
- }
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
- for(int d = 0; d < int(weights_depth); ++d)
- {
- // load 3 weights once
- uvec2 packed_w[3];
-
- packed_w[0] = VLOAD2_CURRENT_ITEM(uvec2, weights_ptr, weights_iter);
- packed_w[1] = VLOAD2(uvec2, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 1, 0));
- packed_w[2] = VLOAD2(uvec2, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 2, 0));
-
- vec3 w[3];
- w[0] = vec3(unpackHalf2x16(packed_w[0].x), unpackHalf2x16(packed_w[0].y).x);
- w[1] = vec3(unpackHalf2x16(packed_w[1].x), unpackHalf2x16(packed_w[1].y).x);
- w[2] = vec3(unpackHalf2x16(packed_w[2].x), unpackHalf2x16(packed_w[2].y).x);
-
- uvec4 packed_s[2];
- vec4 s[3];
- vec4 r[2];
-
- // first line
- s = vload2_src_unpack12_half(CURRENT_ITEM_OFFSET(src_iter));
-
- r = CONVOLVE1x3(s, w[0]);
- pixels[0][0] += r[0];
- pixels[0][1] += r[1];
-
- // second line
- s = vload2_src_unpack12_half(IMAGE_OFFSET(src_iter, 0, 1));
-
- r = CONVOLVE1x3(s, w[1]);
- pixels[0][0] += r[0];
- pixels[0][1] += r[1];
- r = CONVOLVE1x3(s, w[0]);
- pixels[1][0] += r[0];
- pixels[1][1] += r[1];
-
- // third line
- s = vload2_src_unpack12_half(IMAGE_OFFSET(src_iter, 0, 2));
-
- r = CONVOLVE1x3(s, w[2]);
- pixels[0][0] += r[0];
- pixels[0][1] += r[1];
- r = CONVOLVE1x3(s, w[1]);
- pixels[1][0] += r[0];
- pixels[1][1] += r[1];
- r = CONVOLVE1x3(s, w[0]);
- pixels[2][0] += r[0];
- pixels[2][1] += r[1];
-
- // forth line
- s = vload2_src_unpack12_half(IMAGE_OFFSET(src_iter, 0, 3));
-
- r = CONVOLVE1x3(s, w[2]);
- pixels[1][0] += r[0];
- pixels[1][1] += r[1];
- r = CONVOLVE1x3(s, w[1]);
- pixels[2][0] += r[0];
- pixels[2][1] += r[1];
-
- // fifth line
- s = vload2_src_unpack12_half(IMAGE_OFFSET(src_iter, 0, 4));
-
- r = CONVOLVE1x3(s, w[2]);
- pixels[2][0] += r[0];
- pixels[2][1] += r[1];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- if(z_index % uint(2) == uint(0))
- {
- b = vec2_b.x;
- }
- else
- {
- b = vec2_b.y;
- }
-
- for(i = 0; i < 3; i++)
- {
- for(j = 0; j < 2; j++)
- {
- pixels[i][j] += vec4(b);
- }
- }
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels[0] = ACT_OP(pixels[0]);
- pixels[1] = ACT_OP(pixels[1]);
- pixels[2] = ACT_OP(pixels[2]);
-#endif /* FUSED_ACTIVATION */
-
- STORE_PACK8_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
- STORE_PACK8_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
- STORE_PACK8_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels[2]);
-}
-
-#elif defined(PROCESS_4X_1Y_1Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uint, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#if STRIDE_X == 2
-#define CONVOLVE1x3(s, w) convolve1x3_stride2(s, w)
-#define LOAD_AND_UNPACK(offset) VLOAD3_UNPACK12_HALF(src_ptr, offset)
-#elif STRIDE_X == 1 /* STRIDE_X == 1 */
-#define CONVOLVE1x3(s, w) convolve1x3_stride1(s, w)
-#define LOAD_AND_UNPACK(offset) VLOAD2_UNPACK8_HALF(src_ptr, offset)
-#else /* STRIDE_X not equals 1 or 2 */
-#error STRIDE_X larger than 2 is not supported
-#endif /* STRIDE_X == 2 */
-
-vec4 convolve1x3_stride1(vec4 tmp[2], vec3 w)
-{
- vec4 middle;
- vec4 right;
- vec4 r;
-
- middle = vec4(tmp[0].yzw, tmp[1].x);
- right = vec4(tmp[0].zw, tmp[1].xy);
-
- r = tmp[0] * w[0] + middle * w[1] + right * w[2];
-
- return r;
-}
-
-vec4 convolve1x3_stride2(vec4 tmp[3], vec3 w)
-{
- vec4 left;
- vec4 middle;
- vec4 right;
- vec4 r;
-
- left = vec4(tmp[0].xz, tmp[1].xz);
- middle = vec4(tmp[0].yw, tmp[1].yw);
- right = vec4(tmp[0].z, tmp[1].xz, tmp[2].x);
-
- r = left * w[0] + middle * w[1] + right * w[2];
-
- return r;
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- uvec2 packed_d;
-
- vec4 pixels = vec4(0);
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
- for(int d = 0; d < int(weights_depth); ++d)
- {
- // load 3 weights once
- uvec2 packed_w[3];
-
- packed_w[0] = VLOAD2_CURRENT_ITEM(uvec2, weights_ptr, weights_iter);
- packed_w[1] = VLOAD2(uvec2, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 1, 0));
- packed_w[2] = VLOAD2(uvec2, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 2, 0));
-
- vec3 w[3];
- w[0] = vec3(unpackHalf2x16(packed_w[0].x), unpackHalf2x16(packed_w[0].y).x);
- w[1] = vec3(unpackHalf2x16(packed_w[1].x), unpackHalf2x16(packed_w[1].y).x);
- w[2] = vec3(unpackHalf2x16(packed_w[2].x), unpackHalf2x16(packed_w[2].y).x);
-
-#if STRIDE_X == 2
- vec4 s[3];
-#elif STRIDE_X == 1 /* STRIDE_X == 1 */
- vec4 s[2];
-#else /* STRIDE_X not equals 1 or 2 */
-#error STRIDE_X larger than 2 is not supported
-#endif /* STRIDE_X == 2 */
- vec4 r;
-
- // first line
- s = LOAD_AND_UNPACK(CURRENT_ITEM_OFFSET(src_iter));
- pixels += CONVOLVE1x3(s, w[0]);
-
- // second line
- s = LOAD_AND_UNPACK(IMAGE_OFFSET(src_iter, 0, 1));
- pixels += CONVOLVE1x3(s, w[1]);
-
- // third line
- s = LOAD_AND_UNPACK(IMAGE_OFFSET(src_iter, 0, 2));
- pixels += CONVOLVE1x3(s, w[2]);
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
-
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- if(z_index % uint(2) == uint(0))
- {
- b = vec2_b.x;
- }
- else
- {
- b = vec2_b.y;
- }
-
- pixels += vec4(b);
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels = ACT_OP(pixels);
-#endif /* FUSED_ACTIVATION */
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels);
-}
-
-#elif defined(PROCESS_4X_3Y_1Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uint, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#define CONVOLVE1x3(s, w) convolve1x3_stride1(s, w)
-
-vec4 convolve1x3_stride1(vec4 tmp[2], vec3 w)
-{
- vec4 middle;
- vec4 right;
- vec4 r;
-
- middle = vec4(tmp[0].yzw, tmp[1].x);
- right = vec4(tmp[0].zw, tmp[1].xy);
-
- r = tmp[0] * w[0] + middle * w[1] + right * w[2];
-
- return r;
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels[3];
- int i;
-
- for(i = 0; i < 3; i++)
- {
- pixels[i] = vec4(0);
- }
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
- for(int d = 0; d < int(weights_depth); ++d)
- {
- // load 3 weights once
- uvec2 packed_w[3];
-
- packed_w[0] = VLOAD2_CURRENT_ITEM(uvec2, weights_ptr, weights_iter);
- packed_w[1] = VLOAD2(uvec2, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 1, 0));
- packed_w[2] = VLOAD2(uvec2, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 2, 0));
-
- vec3 w[3];
- w[0] = vec3(unpackHalf2x16(packed_w[0].x), unpackHalf2x16(packed_w[0].y).x);
- w[1] = vec3(unpackHalf2x16(packed_w[1].x), unpackHalf2x16(packed_w[1].y).x);
- w[2] = vec3(unpackHalf2x16(packed_w[2].x), unpackHalf2x16(packed_w[2].y).x);
-
- vec4 s[2];
- vec4 r;
-
- // first line
- s = VLOAD2_UNPACK8_CURRENT_ITEM_HALF(src_ptr, src_iter);
- pixels[0] += CONVOLVE1x3(s, w[0]);
-
- // second line
- s = VLOAD2_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 1));
- pixels[0] += CONVOLVE1x3(s, w[1]);
- pixels[1] += CONVOLVE1x3(s, w[0]);
-
- // third line
- s = VLOAD2_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 2));
- pixels[0] += CONVOLVE1x3(s, w[2]);
- pixels[1] += CONVOLVE1x3(s, w[1]);
- pixels[2] += CONVOLVE1x3(s, w[0]);
-
- // forth line
- s = VLOAD2_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 3));
- pixels[1] += CONVOLVE1x3(s, w[2]);
- pixels[2] += CONVOLVE1x3(s, w[1]);
-
- // fifth line
- s = VLOAD2_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 4));
- pixels[2] += CONVOLVE1x3(s, w[2]);
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- if(z_index % uint(2) == uint(0))
- {
- b = vec2_b.x;
- }
- else
- {
- b = vec2_b.y;
- }
-
- for(i = 0; i < 3; i++)
- {
- pixels[i] += vec4(b);
- }
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels[0] = ACT_OP(pixels[0]);
- pixels[1] = ACT_OP(pixels[1]);
- pixels[2] = ACT_OP(pixels[2]);
-#endif /* FUSED_ACTIVATION */
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels[2]);
-}
-
-#elif defined(PROCESS_4X_4Y_1Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uint, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#define CONVOLVE1x3(s, w) convolve1x3_stride1(s, w)
-
-vec4 convolve1x3_stride1(vec4 tmp[2], vec3 w)
-{
- vec4 middle;
- vec4 right;
- vec4 r;
-
- middle = vec4(tmp[0].yzw, tmp[1].x);
- right = vec4(tmp[0].zw, tmp[1].xy);
-
- r = tmp[0] * w[0] + middle * w[1] + right * w[2];
-
- return r;
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels[4];
- int i;
-
- for(i = 0; i < 4; i++)
- {
- pixels[i] = vec4(0);
- }
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
- for(int d = 0; d < int(weights_depth); ++d)
- {
- // load 3 weights once
- uvec2 packed_w[3];
-
- packed_w[0] = VLOAD2(uvec2, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 0, 0));
- packed_w[1] = VLOAD2(uvec2, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 1, 0));
- packed_w[2] = VLOAD2(uvec2, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 2, 0));
-
- vec3 w[3];
- w[0] = vec3(unpackHalf2x16(packed_w[0].x), unpackHalf2x16(packed_w[0].y).x);
- w[1] = vec3(unpackHalf2x16(packed_w[1].x), unpackHalf2x16(packed_w[1].y).x);
- w[2] = vec3(unpackHalf2x16(packed_w[2].x), unpackHalf2x16(packed_w[2].y).x);
-
- vec4 s[2];
- vec4 r;
-
- // first line
- s = VLOAD2_UNPACK8_CURRENT_ITEM_HALF(src_ptr, src_iter);
- pixels[0] += CONVOLVE1x3(s, w[0]);
-
- // second line
- s = VLOAD2_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 1));
- pixels[0] += CONVOLVE1x3(s, w[1]);
- pixels[1] += CONVOLVE1x3(s, w[0]);
-
- // third line
- s = VLOAD2_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 2));
- pixels[0] += CONVOLVE1x3(s, w[2]);
- pixels[1] += CONVOLVE1x3(s, w[1]);
- pixels[2] += CONVOLVE1x3(s, w[0]);
-
- // forth line
- s = VLOAD2_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 3));
- pixels[1] += CONVOLVE1x3(s, w[2]);
- pixels[2] += CONVOLVE1x3(s, w[1]);
- pixels[3] += CONVOLVE1x3(s, w[0]);
-
- // fifth line
- s = VLOAD2_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 4));
- pixels[2] += CONVOLVE1x3(s, w[2]);
- pixels[3] += CONVOLVE1x3(s, w[1]);
-
- // sixth line
- s = VLOAD2_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 5));
- pixels[3] += CONVOLVE1x3(s, w[2]);
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- if(z_index % uint(2) == uint(0))
- {
- b = vec2_b.x;
- }
- else
- {
- b = vec2_b.y;
- }
-
- for(i = 0; i < 4; i++)
- {
- pixels[i] += vec4(b);
- }
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels[0] = ACT_OP(pixels[0]);
- pixels[1] = ACT_OP(pixels[1]);
- pixels[2] = ACT_OP(pixels[2]);
- pixels[3] = ACT_OP(pixels[3]);
-#endif /* FUSED_ACTIVATION */
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels[2]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 3, 0), pixels[3]);
-}
-#elif defined(PROCESS_4X_3Y_2Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uint, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-#define CONVOLVE1x3(s, w) convolve1x3_stride1(s, w)
-
-vec4 convolve1x3_stride1(vec4 tmp[2], vec3 w)
-{
- vec4 middle;
- vec4 right;
- vec4 r;
-
- middle = vec4(tmp[0].yzw, tmp[1].x);
- right = vec4(tmp[0].zw, tmp[1].xy);
-
- r = tmp[0] * w[0] + middle * w[1] + right * w[2];
-
- return r;
-}
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 pixels[3];
- int i;
-
- uint z_base_index = gl_GlobalInvocationID.z << 1;
-
- // store orginal src current offset
- uint s_offset_in_bytes = CURRENT_ITEM_OFFSET_IN_BYTES(srcc_iter);
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_base_index * weights_stride_w);
-
- for(int z = 0; z < 2; ++z)
- {
- uint z_index = z_base_index + uint(z);
-
- SET_TENSOR_ITERATOR_OFFSET_IN_BYTES(src_iter, s_offset_in_bytes);
-
- for(i = 0; i < 3; i++)
- {
- pixels[i] = vec4(0);
- }
-
- for(int d = 0; d < int(weights_depth); ++d)
- {
- // load 3 weights once
- uvec2 packed_w[3];
-
- packed_w[0] = VLOAD2(uvec2, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 0, 0));
- packed_w[1] = VLOAD2(uvec2, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 1, 0));
- packed_w[2] = VLOAD2(uvec2, weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 2, 0));
-
- vec3 w[3];
- w[0] = vec3(unpackHalf2x16(packed_w[0].x), unpackHalf2x16(packed_w[0].y).x);
- w[1] = vec3(unpackHalf2x16(packed_w[1].x), unpackHalf2x16(packed_w[1].y).x);
- w[2] = vec3(unpackHalf2x16(packed_w[2].x), unpackHalf2x16(packed_w[2].y).x);
-
- vec4 s[2];
- vec4 r;
-
- // first line
- s = VLOAD2_UNPACK8_CURRENT_ITEM_HALF(src_ptr, src_iter);
- pixels[0] += CONVOLVE1x3(s, w[0]);
-
- // second line
- s = VLOAD2_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 1));
- pixels[0] += CONVOLVE1x3(s, w[1]);
- pixels[1] += CONVOLVE1x3(s, w[0]);
-
- // third line
- s = VLOAD2_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 2));
- pixels[0] += CONVOLVE1x3(s, w[2]);
- pixels[1] += CONVOLVE1x3(s, w[1]);
- pixels[2] += CONVOLVE1x3(s, w[0]);
-
- // forth line
- s = VLOAD2_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 3));
- pixels[1] += CONVOLVE1x3(s, w[2]);
- pixels[2] += CONVOLVE1x3(s, w[1]);
-
- // fifth line
- s = VLOAD2_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 4));
- pixels[2] += CONVOLVE1x3(s, w[2]);
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-
- if(z_index % uint(2) == uint(0))
- {
- b = vec2_b.x;
- }
- else
- {
- b = vec2_b.y;
- }
-
- for(i = 0; i < 3; i++)
- {
- pixels[i] += vec4(b);
- }
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels[0] = ACT_OP(pixels[0]);
- pixels[1] = ACT_OP(pixels[1]);
- pixels[2] = ACT_OP(pixels[2]);
- pixels[3] = ACT_OP(pixels[3]);
-#endif /* FUSED_ACTIVATION */
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, pixels[0]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 1, 0), pixels[1]);
- STORE_PACK4_HALF(dst_ptr, TENSOR3D_OFFSET(dst_iter, 0, 2, 0), pixels[2]);
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, dst_stride_z);
- }
-}
-
-#endif /* PROCESS_nX_nY_nZ */
-
-#else /* DATA_TYPE_FP32 */
-#error Data type not supported
-#endif /* DATA_TYPE_FP32 */
diff --git a/src/core/GLES_COMPUTE/cs_shaders/direct_convolution5x5.cs b/src/core/GLES_COMPUTE/cs_shaders/direct_convolution5x5.cs
deleted file mode 100644
index e47db549c9..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/direct_convolution5x5.cs
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright (c) 2017-2018 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-#ifdef FUSED_ACTIVATION
-#include "activation_layer_helpers_cs.h"
-#endif /* FUSED_ACTIVATION */
-
-#if defined(DATA_TYPE_FP16)
-precision mediump float;
-#endif // DATA_TYPE_FP16
-
-/** This kernel performs a direct convolution to convolve the low three dimensions
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- * @note This kernel has multiple optimized direct convolution options for FP16.
- * The direct convolution option must be passed at compile time using "#define PROCESS_nX_nY_nZ" e.g. "#define PROCESS_8X_1Y_1Z"
- * @note The convolution stride x must be passed at compile time using "#define STRIDE_X n" e.g. "#define STRIDE_X 1"
- * This OpenGL ES shader works with stride_x = 1 and 2
- * @note If biases are used then "define HAS_BIAS" has to be passed at compile time
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- * @param[out] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr
- * @param[in] weights_attrs The attributes of the weights tensor
- * @param[in] biases_ptr Pointer to the biases tensor. Same as @p src_ptr
- * @param[in] biases_attrs The attributes of the weights tensor
- * @param[in] weights_stride_w Stride of the weights tensor in the 4th dimension
- * @param[in] weights_depth The third dimensions of the weights tensors
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes dst_attrs;
- Tensor3DAttributes weights_attrs;
-#ifdef BIAS
- VectorAttributes biases_attrs;
-#endif /* BIAS */
- uint weights_stride_w;
- uint weights_depth;
-};
-
-#ifdef DATA_TYPE_FP32
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, float, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, float, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- float pixels = 0.f;
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
- float temp[5];
- float temp_weight[5];
- for(int d = 0; d < int(weights_depth); ++d)
- {
- temp = VLOAD5(float[5], src_ptr, IMAGE_OFFSET(src_iter, 0, 0));
- temp_weight = VLOAD5(float[5], weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 0, 0));
- pixels += temp[0] * temp_weight[0] + temp[1] * temp_weight[1] + temp[2] * temp_weight[2] + temp[3] * temp_weight[3] + temp[4] * temp_weight[4];
-
- temp = VLOAD5(float[5], src_ptr, IMAGE_OFFSET(src_iter, 0, 1));
- temp_weight = VLOAD5(float[5], weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 1, 0));
- pixels += temp[0] * temp_weight[0] + temp[1] * temp_weight[1] + temp[2] * temp_weight[2] + temp[3] * temp_weight[3] + temp[4] * temp_weight[4];
-
- temp = VLOAD5(float[5], src_ptr, IMAGE_OFFSET(src_iter, 0, 2));
- temp_weight = VLOAD5(float[5], weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 2, 0));
- pixels += temp[0] * temp_weight[0] + temp[1] * temp_weight[1] + temp[2] * temp_weight[2] + temp[3] * temp_weight[3] + temp[4] * temp_weight[4];
-
- temp = VLOAD5(float[5], src_ptr, IMAGE_OFFSET(src_iter, 0, 3));
- temp_weight = VLOAD5(float[5], weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 3, 0));
- pixels += temp[0] * temp_weight[0] + temp[1] * temp_weight[1] + temp[2] * temp_weight[2] + temp[3] * temp_weight[3] + temp[4] * temp_weight[4];
-
- temp = VLOAD5(float[5], src_ptr, IMAGE_OFFSET(src_iter, 0, 4));
- temp_weight = VLOAD5(float[5], weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, 4, 0));
- pixels += temp[0] * temp_weight[0] + temp[1] * temp_weight[1] + temp[2] * temp_weight[2] + temp[3] * temp_weight[3] + temp[4] * temp_weight[4];
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-
-#ifdef BIAS
- pixels += LOAD(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- pixels = ACT_OP(pixels);
-#endif /* FUSED_ACTIVATION */
-
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, pixels);
-}
-#elif defined(DATA_TYPE_FP16)
-
-// Common definitions for DATA_TYPE_FP16
-#if STRIDE_X == 1
-#define LOAD_SRC_AT_ROW(row) VLOAD2_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, row))
-#define CONVOLVE1x5(src, weight) convolve1x5_stride1(src, weight)
-#elif STRIDE_X == 2 /* STRIDE_X == 1 */
-#define LOAD_SRC_AT_ROW(row) VLOAD3_UNPACK12_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, row))
-#define CONVOLVE1x5(src, weight) convolve1x5_stride2(src, weight)
-#else /* STRDIDE_X == 1 */
-#error STRIDE_X larger than 2 is not supported
-#endif /* STRIDE_X == 1 */
-
-#define LOAD_WEIGHT_AT_ROW(row) VLOAD3_UNPACK6_HALF(weights_ptr, TENSOR3D_OFFSET(weights_iter, 0, row, 0))
-
-vec4 convolve1x5_stride1(vec4 tmp[2], vec2 w[3])
-{
- vec4 src0 = tmp[0];
- vec4 src1 = vec4(tmp[0].yzw, tmp[1].x);
- vec4 src2 = vec4(tmp[0].zw, tmp[1].xy);
- vec4 src3 = vec4(tmp[0].w, tmp[1].xyz);
- vec4 src4 = tmp[1];
- vec4 ret = src0 * w[0].x + src1 * w[0].y + src2 * w[1].x + src3 * w[1].y + src4 * w[2].x;
-
- return ret;
-}
-
-vec4 convolve1x5_stride2(vec4 tmp[3], vec2 w[3])
-{
- vec4 src0 = vec4(tmp[0].xz, tmp[1].xz);
- vec4 src1 = vec4(tmp[0].yw, tmp[1].yw);
- vec4 src2 = vec4(tmp[0].z, tmp[1].xz, tmp[2].x);
- vec4 src3 = vec4(tmp[0].w, tmp[1].yw, tmp[2].y);
- vec4 src4 = vec4(tmp[1].x, tmp[1].z, tmp[2].xz);
- vec4 ret = src0 * w[0].x + src1 * w[0].y + src2 * w[1].x + src3 * w[1].y + src4 * w[2].x;
-
- return ret;
-}
-
-#if defined(PROCESS_4X_1Y_1Z)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-TENSOR_DECLARATION(3, weightsBuffer, uint, weights_ptr, weights_shift, 2, readonly);
-#ifdef BIAS
-TENSOR_DECLARATION(4, biasesBuffer, uint, biases_ptr, biases_shift, 2, readonly);
-#endif /* BIAS */
-
-void main()
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator weights_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(weights_attrs, weights_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
-#ifdef BIAS
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR_NO_STEP(biases_attrs, biases_shift);
-#endif /* BIAS */
-
- vec4 res = vec4(0);
- vec2 w[3];
- vec4 s[STRIDE_X + 1];
-
- uint z_index = gl_GlobalInvocationID.z;
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, z_index * weights_stride_w);
-
- for(int d = 0; d < int(weights_depth); ++d)
- {
- for(int row = 0; row < 5; row++)
- {
- w = LOAD_WEIGHT_AT_ROW(row);
- s = LOAD_SRC_AT_ROW(row);
- res += CONVOLVE1x5(s, w);
- }
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, src_attrs.stride_z);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(weights_iter, weights_attrs.stride_z);
- }
-
-#ifdef BIAS
- vec2 vec2_b;
- float b;
-
- vec2_b = LOAD_UNPACK2_HALF(biases_ptr, VECTOR_OFFSET(biases_iter, z_index));
- b = (z_index % uint(2) == uint(0)) ? vec2_b.x : vec2_b.y;
- res += vec4(b);
-#endif /* BIAS */
-
-#ifdef FUSED_ACTIVATION
- res = ACT_OP(res);
-#endif /* FUSED_ACTIVATION */
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, res);
-}
-
-#endif /* PROCESS_nX_nY_nZ */
-#else /* DATA_TYPE_FP32 */
-#error Data type not supported
-#endif /* DATA_TYPE_FP32 */
diff --git a/src/core/GLES_COMPUTE/cs_shaders/dropout.cs b/src/core/GLES_COMPUTE/cs_shaders/dropout.cs
deleted file mode 100644
index 89ac8fea2e..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/dropout.cs
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2017-2018 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-#if defined(DATA_TYPE_FP16)
-precision mediump float;
-#endif /*DATA_TYPE_FP16*/
-
-uint hash(uint x)
-{
- x += (x << 10u);
- x ^= (x >> 6u);
- x += (x << 3u);
- x ^= (x >> 11u);
- x += (x << 15u);
- return x;
-}
-
-uint hash(uvec3 v)
-{
- return hash(v.x ^ hash(v.y) ^ hash(v.z));
-}
-
-float float_construct(uint m)
-{
- const uint ieee_mantissa = 0x007FFFFFu;
- const uint ieee_one = 0x3F800000u;
-
- m &= ieee_mantissa;
- m |= ieee_one;
-
- float f = uintBitsToFloat(m);
- return f - 1.0;
-}
-
-float rand(vec3 v, float seed)
-{
- return float_construct(hash(floatBitsToUint(v + seed)));
-}
-
-/** Dropout is used to improve over-fit on neural networks.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] mask_ptr Pointer to the mask tensor. Supported data types: same as @p src_ptr
- * @param[in] mask_attrs The attributes of the mask tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes mask_attrs;
- Tensor3DAttributes dst_attrs;
-};
-
-#ifdef DATA_TYPE_FP32
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, maskBuffer, float, mask_ptr, mask_shift, 2, restrict);
-TENSOR_DECLARATION(3, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator mask_iter = CONVERT_TO_TENSOR3D_ITERATOR(mask_attrs, mask_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- float random = 0.f;
- float inputv = 0.f;
- float maskv = 0.f;
- float outputv = 0.f;
-
-#ifdef FORWARD
- random = rand(vec3(gl_GlobalInvocationID.xyz), SEED);
- maskv = (random > RATIO) ? 1.f : 0.f;
- STORE_CURRENT_ITEM(mask_ptr, mask_iter, maskv);
-#else /* FORWARD */
- maskv = LOAD_CURRENT_ITEM(mask_ptr, mask_iter);
-#endif /* FORWARD */
-
- inputv = LOAD_CURRENT_ITEM(src_ptr, src_iter);
- outputv = maskv * inputv * float(SCALE);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, outputv);
-}
-
-#elif defined(DATA_TYPE_FP16)
-TENSOR_DECLARATION(1, srcBuffer, uint, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, maskBuffer, uint, mask_ptr, mask_shift, 2, restrict);
-TENSOR_DECLARATION(3, dstBuffer, uint, dst_ptr, dst_shift, 2, writeonly);
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator mask_iter = CONVERT_TO_TENSOR3D_ITERATOR(mask_attrs, mask_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- float random1 = 0.f;
- float random2 = 0.f;
- vec2 input_vec = vec2(0, 0);
- vec2 output_vec = vec2(0, 0);
- vec2 mask_vec = vec2(0, 0);
-
-#ifdef FORWARD
- random1 = rand(vec3(gl_GlobalInvocationID.xyz), SEED);
- random2 = rand(vec3(float(gl_GlobalInvocationID.x) + 0.5f, gl_GlobalInvocationID.yz), SEED);
- mask_vec.x = (random1 > RATIO) ? 1.f : 0.f;
- mask_vec.y = (random2 > RATIO) ? 1.f : 0.f;
-
- STORE_PACK2_CURRENT_ITEM_HALF(mask_ptr, mask_iter, mask_vec);
-#else /* FORWARD */
- mask_vec = LOAD_UNPACK2_CURRENT_ITEM_HALF(mask_ptr, mask_iter);
-#endif /* FORWARD */
-
- input_vec = LOAD_UNPACK2_CURRENT_ITEM_HALF(src_ptr, src_iter);
- output_vec = mask_vec * input_vec * float(SCALE);
-
- STORE_PACK2_CURRENT_ITEM_HALF(dst_ptr, dst_iter, output_vec);
-}
-
-#else /* DATA_TYPE_FP32 */
-
-#endif /* DATA_TYPE_FP32 */
diff --git a/src/core/GLES_COMPUTE/cs_shaders/fill_border.cs b/src/core/GLES_COMPUTE/cs_shaders/fill_border.cs
deleted file mode 100644
index 4e96a5ec74..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/fill_border.cs
+++ /dev/null
@@ -1,498 +0,0 @@
-/*
- * Copyright (c) 2017 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-#if defined(DATA_TYPE_FP16)
-precision mediump float;
-#endif // DATA_TYPE_FP16
-
-#ifdef FILL_IMAGE_BORDERS_REPLICATE
-
-/** Fill N pixel of the padding edge of a single channel image by replicating the closest valid pixel.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- * @attention The border size for top, bottom, left, right needs to be passed at the compile time.
- * e.g. BORDER_SIZE_TOP=0 BORDER_SIZE_BOTTOM=2 BORDER_SIZE_LEFT=0 BORDER_SIZE_RIGHT=2
- *
- * @param[in,out] buf_ptr Pointer to the source image. Supported data types: F16/F32
- * @param[in] buf_attrs The attributes of the source image
- * @param[in] width Width of the valid region of the image
- * @param[in] height Height of the valid region of the image
- * @param[in] start_pos_x X coordinate indicating the start point of the valid region
- * @param[in] start_pos_y Y coordinate indicating the start point of the valid region
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes buf_attrs;
- uint width;
- uint height;
- int start_pos_x;
- int start_pos_y;
-};
-
-#if defined(DATA_TYPE_FP32)
-
-TENSOR_DECLARATION(1, bufBuffer, float, buf_ptr, buf_shift, 2, restrict);
-
-void main()
-{
- ImageIterator buf_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR_NO_STEP(buf_attrs, buf_shift);
-
- // Update pointer to point to the starting point of the valid region
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(buf_iter, start_pos_y * int(buf_attrs.stride_y) + start_pos_x * int(buf_attrs.stride_x));
-
- int total_width = BORDER_SIZE_LEFT + int(width) + BORDER_SIZE_RIGHT;
- int gid0 = int(gl_GlobalInvocationID.x);
- int gidH = gid0 - total_width;
- int gidW = gid0 - BORDER_SIZE_LEFT;
-
- if(gidH >= 0)
- {
- // Handle left border
- float left_val = LOAD(buf_ptr, IMAGE_OFFSET(buf_iter, 0, gidH));
- for(int i = 0; i < BORDER_SIZE_LEFT; ++i)
- {
- STORE(buf_ptr, IMAGE_OFFSET(buf_iter, -(i + 1), gidH), left_val);
- }
- // Handle right border
- float right_val = LOAD(buf_ptr, IMAGE_OFFSET(buf_iter, int(width) - 1, gidH));
- for(int i = 0; i < BORDER_SIZE_RIGHT; ++i)
- {
- STORE(buf_ptr, IMAGE_OFFSET(buf_iter, int(width) + i, gidH), right_val);
- }
- }
- else
- {
- // Get value for corners
- int val_idx = gidW;
- if(gidW < 0 || gidW > (int(width) - 1))
- {
- val_idx = gidW < 0 ? 0 : int(width) - 1;
- }
-
- // Handle top border
- float top_val = LOAD(buf_ptr, IMAGE_OFFSET(buf_iter, val_idx, 0));
- for(int i = 0; i < BORDER_SIZE_TOP; ++i)
- {
- STORE(buf_ptr, IMAGE_OFFSET(buf_iter, gidW, -(i + 1)), top_val);
- }
- // Handle bottom border
- float bottom_val = LOAD(buf_ptr, IMAGE_OFFSET(buf_iter, val_idx, int(height) - 1));
- for(int i = 0; i < BORDER_SIZE_BOTTOM; ++i)
- {
- STORE(buf_ptr, IMAGE_OFFSET(buf_iter, gidW, int(height) + i), bottom_val);
- }
- }
-}
-#elif defined(DATA_TYPE_FP16)
-
-TENSOR_DECLARATION(1, bufBuffer, uint, buf_ptr, buf_shift, 2, restrict);
-
-void set_replicate(uint offset, int pos, vec2 replicate_value)
-{
- vec2 b = LOAD_UNPACK2_HALF(buf_ptr, offset);
-
- if(pos % 2 == 0)
- {
- b.x = replicate_value.y;
- }
- else
- {
- b.y = replicate_value.x;
- }
-
- STORE_PACK2_HALF(buf_ptr, offset, b);
-}
-
-void main()
-{
- ImageIterator buf_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR_NO_STEP(buf_attrs, buf_shift);
-
- // Update pointer to point to the starting point of the valid region
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(buf_iter, start_pos_y * int(buf_attrs.stride_y) + start_pos_x * int(buf_attrs.stride_x));
-
- int total_width = BORDER_SIZE_LEFT + int(width) + BORDER_SIZE_RIGHT;
- int gid0 = int(gl_GlobalInvocationID.x);
- int gidH = gid0 - total_width;
- int gidW = gid0 - BORDER_SIZE_LEFT;
-
- if(gidH >= 0)
- {
- // Handle left border
- vec2 left_val = LOAD_UNPACK2_HALF(buf_ptr, IMAGE_OFFSET(buf_iter, 0, gidH));
- for(int i = 0; i < BORDER_SIZE_LEFT; ++i)
- {
- uint offset = IMAGE_OFFSET(buf_iter, -(i + 1), gidH);
- int pos = BORDER_SIZE_LEFT - i - 1;
- if(i == 0)
- {
- if(pos % 2 == 0)
- {
- set_replicate(offset, pos, left_val);
- }
- }
- else
- {
- if(pos % 2 == 0)
- {
- if(BORDER_SIZE_LEFT % 2 == 0)
- {
- STORE_PACK2_HALF(buf_ptr, offset, left_val.xx);
- }
- else
- {
- STORE_PACK2_HALF(buf_ptr, offset, left_val.yy);
- }
- i++;
- }
- }
- }
- // Handle right border
- vec2 right_val_origin = LOAD_UNPACK2_HALF(buf_ptr, IMAGE_OFFSET(buf_iter, int(width) - 1, gidH));
- vec2 right_val;
- if((((BORDER_SIZE_LEFT + int(width)) % 2)) == 1)
- {
- right_val = vec2(right_val_origin.x, right_val_origin.x);
- }
- else
- {
- right_val = vec2(right_val_origin.y, right_val_origin.y);
- }
- for(int i = 0; i < BORDER_SIZE_RIGHT; ++i)
- {
- uint offset = IMAGE_OFFSET(buf_iter, int(width) + i, gidH);
- int pos = i + BORDER_SIZE_LEFT + int(width);
-
- if(i == 0)
- {
- if(pos % 2 == 0)
- {
- STORE_PACK2_HALF(buf_ptr, offset, right_val);
- i++;
- }
- else
- {
- set_replicate(offset, pos, right_val);
- }
- }
- else
- {
- if(pos % 2 == 0)
- {
- STORE_PACK2_HALF(buf_ptr, offset, right_val);
- i++;
- }
- }
- }
- }
- else
- {
- // Get value for corners
- int val_idx = gidW;
- if(gidW < 0 || (gidW > (int(width) - 1)))
- {
- val_idx = gidW < 0 ? 0 : (int(width) - 1);
- }
-
- // Handle top border
- vec2 top_val = LOAD_UNPACK2_HALF(buf_ptr, IMAGE_OFFSET(buf_iter, val_idx, 0));
- for(int i = 0; i < BORDER_SIZE_TOP; ++i)
- {
- uint offset = IMAGE_OFFSET(buf_iter, gidW, -(i + 1));
-
- if(gid0 % 2 == 0)
- {
- if(gidW == (int(width) - 1))
- {
- if(((BORDER_SIZE_LEFT + int(width)) % 2 == 1))
- {
- STORE_PACK2_HALF(buf_ptr, offset, top_val.xx);
- }
- else
- {
- STORE_PACK2_HALF(buf_ptr, offset, top_val.yy);
- }
- }
- else
- {
- if(gidW < 0)
- {
- if(BORDER_SIZE_LEFT % 2 == 0)
- {
- STORE_PACK2_HALF(buf_ptr, offset, top_val.xx);
- }
- else
- {
- STORE_PACK2_HALF(buf_ptr, offset, top_val.yy);
- }
- }
- else if(gidW >= int(width))
- {
- if((BORDER_SIZE_LEFT + int(width)) % 2 == 0)
- {
- STORE_PACK2_HALF(buf_ptr, offset, top_val.yy);
- }
- else
- {
- STORE_PACK2_HALF(buf_ptr, offset, top_val.xx);
- }
- }
- else
- {
- STORE_PACK2_HALF(buf_ptr, offset, top_val);
- }
- }
- }
- }
- // Handle bottom border
- vec2 bottom_val = LOAD_UNPACK2_HALF(buf_ptr, IMAGE_OFFSET(buf_iter, val_idx, int(height) - 1));
- for(int i = 0; i < BORDER_SIZE_BOTTOM; ++i)
- {
- uint offset = IMAGE_OFFSET(buf_iter, gidW, int(height) + i);
-
- if(gid0 % 2 == 0)
- {
- if(gidW == (int(width) - 1))
- {
- STORE_PACK2_HALF(buf_ptr, offset, bottom_val.xx);
- }
- else
- {
- if(gidW < 0)
- {
- if(BORDER_SIZE_LEFT % 2 == 0)
- {
- STORE_PACK2_HALF(buf_ptr, offset, bottom_val.xx);
- }
- else
- {
- STORE_PACK2_HALF(buf_ptr, offset, bottom_val.yy);
- }
- }
- else if(gidW >= int(width))
- {
- if((BORDER_SIZE_LEFT + int(width)) % 2 == 0)
- {
- STORE_PACK2_HALF(buf_ptr, offset, bottom_val.yy);
- }
- else
- {
- STORE_PACK2_HALF(buf_ptr, offset, bottom_val.xx);
- }
- }
- else
- {
- STORE_PACK2_HALF(buf_ptr, offset, bottom_val);
- }
- }
- }
- }
- }
-}
-
-#endif /* DATA_TYPE_FP32 */
-
-#endif /* FILL_IMAGE_BORDERS_REPLICATE */
-
-#ifdef FILL_IMAGE_BORDERS_CONSTANT
-
-/** Fill N pixels of the padding edge of a single channel image with a constant value.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- * @attention The border size for top, bottom, left, right needs to be passed at the compile time.
- * e.g. BORDER_SIZE_TOP=0 BORDER_SIZE_BOTTOM=2 BORDER_SIZE_LEFT=0 BORDER_SIZE_RIGHT=2
- *
- * @param[out] buf_ptr Pointer to the source image. Supported data types: F16/F32
- * @param[in] buf_attrs The attributes of the source image
- * @param[in] width Width of the valid region of the image
- * @param[in] height Height of the valid region of the image
- * @param[in] start_pos_x X coordinate indicating the start point of the valid region
- * @param[in] start_pos_y Y coordinate indicating the start point of the valid region
- * @param[in] constant_value Constant value to use to fill the edges
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes buf_attrs;
- uint width;
- uint height;
- int start_pos_x;
- int start_pos_y;
- float constant_value;
-};
-
-#if defined(DATA_TYPE_FP32)
-TENSOR_DECLARATION(1, bufBuffer, float, buf_ptr, buf_shift, 2, writeonly);
-
-void main()
-{
- ImageIterator buf_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR_NO_STEP(buf_attrs, buf_shift);
-
- // Update pointer to point to the starting point of the valid region
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(buf_iter, start_pos_y * int(buf_attrs.stride_y) + start_pos_x * int(buf_attrs.stride_x));
-
- int total_width = BORDER_SIZE_LEFT + int(width) + BORDER_SIZE_RIGHT;
- int gid0 = int(gl_GlobalInvocationID.x);
- int gidH = gid0 - total_width;
- int gidW = gid0 - BORDER_SIZE_LEFT;
-
- if(gidH >= 0)
- {
- // Handle left border
- for(int i = 0; i < BORDER_SIZE_LEFT; ++i)
- {
- STORE(buf_ptr, IMAGE_OFFSET(buf_iter, -(i + 1), gidH), constant_value);
- }
- // Handle right border
- for(int i = 0; i < BORDER_SIZE_RIGHT; ++i)
- {
- STORE(buf_ptr, IMAGE_OFFSET(buf_iter, int(width) + i, gidH), constant_value);
- }
- }
- else
- {
- // Handle top border
- for(int i = 0; i < BORDER_SIZE_TOP; ++i)
- {
- STORE(buf_ptr, IMAGE_OFFSET(buf_iter, gidW, -(i + 1)), constant_value);
- }
- // Handle bottom border
- for(int i = 0; i < BORDER_SIZE_BOTTOM; ++i)
- {
- STORE(buf_ptr, IMAGE_OFFSET(buf_iter, gidW, int(height) + i), constant_value);
- }
- }
-}
-
-#elif defined(DATA_TYPE_FP16)
-TENSOR_DECLARATION(1, bufBuffer, uint, buf_ptr, buf_shift, 2, restrict);
-
-void set_constant(uint offset, int pos)
-{
- vec2 b = LOAD_UNPACK2_HALF(buf_ptr, offset);
-
- if(pos % 2 == 0)
- {
- b.x = constant_value;
- }
- else
- {
- b.y = constant_value;
- }
-
- STORE_PACK2_HALF(buf_ptr, offset, b);
-}
-
-void main()
-{
- ImageIterator buf_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR_NO_STEP(buf_attrs, buf_shift);
-
- int total_width = BORDER_SIZE_LEFT + int(width) + BORDER_SIZE_RIGHT;
- int gid0 = int(gl_GlobalInvocationID.x);
- int gidH = gid0 - total_width;
- int gidW = gid0 - BORDER_SIZE_LEFT;
-
- // Update pointer to point to the starting point of the valid region
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(buf_iter, start_pos_y * int(buf_attrs.stride_y) + start_pos_x * int(buf_attrs.stride_x));
-
- vec2 b = vec2(constant_value, constant_value);
-
- if(gidH >= 0)
- {
- // Handle left border
- for(int i = 0; i < BORDER_SIZE_LEFT; ++i)
- {
- uint offset = IMAGE_OFFSET(buf_iter, -(i + 1), gidH);
- int pos = BORDER_SIZE_LEFT - i - 1;
-
- if(i == 0)
- {
- if(pos % 2 == 0)
- {
- set_constant(offset, pos);
- }
- }
- else
- {
- if(pos % 2 == 0)
- {
- STORE_PACK2_HALF(buf_ptr, offset, b);
- }
- }
- }
- // Handle right border
- for(int i = 0; i < BORDER_SIZE_RIGHT; ++i)
- {
- uint offset = IMAGE_OFFSET(buf_iter, int(width) + i, gidH);
- int pos = i + BORDER_SIZE_LEFT + int(width);
-
- if(i == 0)
- {
- if(pos % 2 == 0)
- {
- STORE_PACK2_HALF(buf_ptr, offset, b);
- }
- else
- {
- set_constant(offset, pos);
- }
- }
- else
- {
- if(pos % 2 == 0)
- {
- STORE_PACK2_HALF(buf_ptr, offset, b);
- }
- }
- }
- }
- else
- {
- // Handle top border
- for(int i = 0; i < BORDER_SIZE_TOP; ++i)
- {
- uint offset = IMAGE_OFFSET(buf_iter, gidW, -(i + 1));
-
- if(gid0 % 2 == 0)
- {
- STORE_PACK2_HALF(buf_ptr, offset, b);
- }
- }
- // Handle bottom border
- for(int i = 0; i < BORDER_SIZE_BOTTOM; ++i)
- {
- uint offset = IMAGE_OFFSET(buf_iter, gidW, int(height) + i);
-
- if(gid0 % 2 == 0)
- {
- STORE_PACK2_HALF(buf_ptr, offset, b);
- }
- }
- }
-}
-
-#endif /* DATA_TYPE_FP32 */
-
-#endif /* FILL_IMAGE_BORDERS_CONSTANT */
diff --git a/src/core/GLES_COMPUTE/cs_shaders/gemm.cs b/src/core/GLES_COMPUTE/cs_shaders/gemm.cs
deleted file mode 100644
index d41b48c2a7..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/gemm.cs
+++ /dev/null
@@ -1,1130 +0,0 @@
-/*
- * Copyright (c) 2017-2018 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-#include "helpers_cs.h"
-
-#if defined(DATA_TYPE_FP16)
-precision mediump float;
-#endif // DATA_TYPE_FP16
-
-#if defined(DATA_TYPE_FP32)
-#ifdef GEMM_TRANSPOSE1xW
-/** This OpenGL ES kernel computes the "vector" 1x4 transposition of input matrix
- *
- * @param[in] src_ptr Pointer to the source matrix. Supported data types: F32
- * @param[in] src_attrs The attributes of the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination matrix
- */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes src_attrs;
- ImageAttributes dst_attrs;
-};
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-
-void main(void)
-{
- /* Compute address for Matrix B - source */
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
-
- /* Compute address for Matrix B transposed - destination. X and Y are swapped */
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, gl_GlobalInvocationID.y * uint(16) + gl_GlobalInvocationID.x * dst_attrs.stride_y);
-
- vec4 b0 = VLOAD4_CURRENT_ITEM(vec4, src_ptr, src_iter);
- VSTORE4_CURRENT_ITEM(dst_ptr, dst_iter, b0);
-}
-#endif /* GEMM_TRANSPOSE1xW */
-
-#ifdef GEMM_INTERLEAVE4x4
-/** This OpenGLES kernel reshapes the input matrix interleaving the values
- *
- * @param[in] src_ptr Pointer to the source matrix. Supported data types: F32
- * @param[in] src_attrs The attributes of the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination matrix
- */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes src_attrs;
- ImageAttributes dst_attrs;
-};
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-
-void main(void)
-{
- /* Compute source and destination addresses */
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
-
- int i;
- int j;
-
- for(i = 0; i < 4; ++i)
- {
- for(j = 0; j < 4; ++j)
- {
- float res = LOAD(src_ptr, IMAGE_OFFSET(src_iter, i, j));
- STORE(dst_ptr, TENSOR_OFFSET_ADVANCE(dst_iter, (i * 4 + j)), res);
- }
- }
-}
-#endif /* GEMM_INTERLEAVE4x4 */
-
-#ifdef GEMM_ACCUMULATE_BIASES
-/** This kernel accumulates each row with the biases vector
- *
- * @param[in, out] accum_ptr Pointer to the accumulate tensor. Supported data type: F32
- * @param[in] accum_attrs The attributes of the accumulate tensor
- * @param[in] biases_ptr Pointer to the biases vector. Same as @p accum_ptr
- * @param[in] biases_attrs The attributes of the biases tensor
- */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes accum_attrs;
- VectorAttributes biases_attrs;
-};
-TENSOR_DECLARATION(1, accumBuffer, float, accum_ptr, accum_shift, 2, restrict);
-TENSOR_DECLARATION(2, biasesBuffer, float, biases_ptr, biases_shift, 2, readonly);
-
-void main(void)
-{
- ImageIterator accum_iter = CONVERT_TO_IMAGE_ITERATOR(accum_attrs, accum_shift);
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR(biases_attrs, biases_shift);
-
- for(int i = 0; i < 16; ++i)
- {
- float accum_value = LOAD(accum_ptr, TENSOR_OFFSET_ADVANCE(accum_iter, i));
- float biases_value = LOAD(biases_ptr, TENSOR_OFFSET_ADVANCE(biases_iter, i));
- accum_value = biases_value + accum_value;
-
- // Store result in the accummulate buffer
- STORE(accum_ptr, TENSOR_OFFSET_ADVANCE(accum_iter, i), accum_value);
- }
-}
-#endif /* GEMM_ACCUMULATE_BIASES */
-
-#ifdef GEMM_MM_INTERLEAVED_TRANSPOSED /* unvalidate */
-/** This OpenGL ES kernel is optimised for Midgard. It computes the matrix multiplication between matrix A (src0) and matrix B (src1)
- * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication
- *
- * @note The optional value of scalar alpha is passed at compile time using -DALPHA=alpha
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
- * @param[in] src0_attrs The attributes of the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_attrs The attributes of the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_attrs The attributes of the destination matrix
- */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes src0_attrs;
- ImageAttributes src1_attrs;
- ImageAttributes dst_attrs;
-};
-TENSOR_DECLARATION(1, src0Buffer, float, src0_ptr, src0_shift, 2, readonly);
-TENSOR_DECLARATION(2, src1Buffer, float, src1_ptr, src1_shift, 2, readonly);
-TENSOR_DECLARATION(3, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-
-void main()
-{
- ImageIterator src0_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src0_attrs, src0_shift);
- ImageIterator src1_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src1_attrs, src1_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
-
- /* Compute address for matrix A and B */
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(gl_GlobalInvocationID.y) * (src0_attrs.stride_y));
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(gl_GlobalInvocationID.x) * (src1_attrs.stride_y));
- /* Compute end row address for matrix B */
- int end_row_mtx_b = int(TENSOR_OFFSET_ADVANCE(src1_iter, COLS_B));
-
- /* Reset accumulators */
- vec4 c00 = vec4(0.0f);
- vec4 c10 = vec4(0.0f);
- vec4 c20 = vec4(0.0f);
- vec4 c30 = vec4(0.0f);
-
- // FIXME: loop unrolling really needed for GLES?
- for(; int(CURRENT_ITEM_OFFSET(src1_iter)) <= (end_row_mtx_b - 8); TENSOR_ITERATOR_ADVANCE(src0_iter, 8), TENSOR_ITERATOR_ADVANCE(src1_iter, 8))
- {
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
- vec4 a0 = VLOAD4_CURRENT_ITEM(vec4, src0_ptr, src0_iter);
- vec4 b0 = VLOAD4_CURRENT_ITEM(vec4, src1_ptr, src1_iter);
-
- c00 += vec4(a0.x) * b0;
- c10 += vec4(a0.y) * b0;
- c20 += vec4(a0.z) * b0;
- c30 += vec4(a0.w) * b0;
-
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
- a0 = VLOAD4(vec4, src0_ptr, TENSOR_OFFSET_ADVANCE(src0_iter, 4));
- b0 = VLOAD4(vec4, src1_ptr, TENSOR_OFFSET_ADVANCE(src1_iter, 4));
-
- c00 += vec4(a0.x) * b0;
- c10 += vec4(a0.y) * b0;
- c20 += vec4(a0.z) * b0;
- c30 += vec4(a0.w) * b0;
- }
-
- for(; int(CURRENT_ITEM_OFFSET(src1_iter)) < end_row_mtx_b; TENSOR_ITERATOR_ADVANCE(src0_iter, 4), TENSOR_ITERATOR_ADVANCE(src1_iter, 4))
- {
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
- vec4 a0 = VLOAD4_CURRENT_ITEM(vec4, src0_ptr, src0_iter);
- vec4 b0 = VLOAD4_CURRENT_ITEM(vec4, src1_ptr, src1_iter);
-
- c00 += vec4(a0.x) * b0;
- c10 += vec4(a0.y) * b0;
- c20 += vec4(a0.z) * b0;
- c30 += vec4(a0.w) * b0;
- }
-
- /* Multiply by the weight of matrix product */
- c00 = c00 * vec4(ALPHA);
- c10 = c10 * vec4(ALPHA);
- c20 = c20 * vec4(ALPHA);
- c30 = c30 * vec4(ALPHA);
-
- /* Store 4x4 block */
- VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 0), c00);
- VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), c10);
- VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), c20);
- VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), c30);
-}
-#endif /* GEMM_MM_INTERLEAVED_TRANSPOSED */
-
-#ifdef GEMM_MM_FLOATING_POINT
-/** This OpenGL ES kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1)
- * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication
- *
- * @note The number of elements processed along the x and y directions must be passed at compile time using -DNUM_ELEMS_PROCESSED_PER_THREAD_X and -DNUM_ELEMS_PROCESSED_PER_THREAD_Y.
- * @note The number of matrix A columns must be passed at compile time using -DCOLS_A.
- * @note The optional value of scalar alpha is passed at compile time using -DALPHA=alpha
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
- * @param[in] src0_attrs The attributes of the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_attrs The attributes of the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_attrs The attributes of the destination matrix
- */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes src0_attrs;
- ImageAttributes src1_attrs;
- ImageAttributes dst_attrs;
-};
-TENSOR_DECLARATION(1, src0Buffer, float, src0_ptr, src0_shift, 2, readonly);
-TENSOR_DECLARATION(2, src1Buffer, float, src1_ptr, src1_shift, 2, readonly);
-TENSOR_DECLARATION(3, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-
-void main()
-{
- ImageIterator src0_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src0_attrs, src0_shift);
- ImageIterator src1_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src1_attrs, src1_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
-
- int idx = int(gl_GlobalInvocationID.x) * int(NUM_ELEMS_PROCESSED_PER_THREAD_X);
- /* Compute the address for the vector A and matrix B */
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(gl_GlobalInvocationID.y) * (src0_attrs.stride_y) * uint(NUM_ELEMS_PROCESSED_PER_THREAD_Y));
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, idx * 4);
-
- /* Compute end row address for matrix A */
- int end_row_vec_a = int(TENSOR_OFFSET_ADVANCE_IN_BYTES(src0_iter, COLS_A * 4));
-
- /* Reset accumulators */
- vec4 acc0 = vec4(0.0f);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- vec4 acc1 = vec4(0.0f);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- vec4 acc2 = vec4(0.0f);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- vec4 acc3 = vec4(0.0f);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- for(; int(CURRENT_ITEM_OFFSET(src0_iter)) <= (end_row_vec_a - 2); TENSOR_ITERATOR_ADVANCE(src0_iter, 2), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(2) * src1_attrs.stride_y))
- {
- vec2 a0 = VLOAD2_CURRENT_ITEM(vec2, src0_ptr, src0_iter);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- vec2 a1 = VLOAD2(vec2, src0_ptr, IMAGE_OFFSET(src0_iter, 0, 1));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- vec2 a2 = VLOAD2(vec2, src0_ptr, IMAGE_OFFSET(src0_iter, 0, 2));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- vec2 a3 = VLOAD2(vec2, src0_ptr, IMAGE_OFFSET(src0_iter, 0, 3));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- vec4 b0 = VLOAD4_CURRENT_ITEM(vec4, src1_ptr, src1_iter);
- vec4 b1 = VLOAD4(vec4, src1_ptr, IMAGE_OFFSET(src1_iter, 0, 1));
-
- acc0 += b0 * vec4(a0.x);
- acc0 += b1 * vec4(a0.y);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- acc1 += b0 * vec4(a1.x);
- acc1 += b1 * vec4(a1.y);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- acc2 += b0 * vec4(a2.x);
- acc2 += b1 * vec4(a2.y);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- acc3 += b0 * vec4(a3.x);
- acc3 += b1 * vec4(a3.y);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- }
-
- for(; int(CURRENT_ITEM_OFFSET(src0_iter)) < end_row_vec_a; TENSOR_ITERATOR_ADVANCE(src0_iter, 1), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, src1_attrs.stride_y))
- {
- // Load values from matrix A
- float a0 = LOAD_CURRENT_ITEM(src0_ptr, src0_iter);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- float a1 = LOAD(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 1));
- //float a1 = 0;
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- float a2 = LOAD(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 2));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- float a3 = LOAD(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 3));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- vec4 b0 = VLOAD4_CURRENT_ITEM(vec4, src1_ptr, src1_iter);
-
- acc0 += b0 * vec4(a0);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- acc1 += b0 * vec4(a1);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- acc2 += b0 * vec4(a2);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- acc3 += b0 * vec4(a3);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- }
-
- /* Multiply by the weight of vector-matrix product */
- acc0 = acc0 * vec4(ALPHA);
- VSTORE4_CURRENT_ITEM(dst_ptr, dst_iter, acc0);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- acc1 = acc1 * vec4(ALPHA);
- VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), acc1);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- acc2 = acc2 * vec4(ALPHA);
- VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), acc2);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- acc3 = acc3 * vec4(ALPHA);
- VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), acc3);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-}
-#endif /* GEMM_MM_FLOATING_POINT */
-
-#ifdef GEMM_MM_FLOATING_POINT_BIFROST
-/** This OpenGL ES kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1)
- * Matrix A and matrix B in case both matrices have not been reshaped
- *
- * @note The number of elements processed along the x and y directions must be passed at compile time using -DNUM_ELEMS_PROCESSED_PER_THREAD_X and -DNUM_ELEMS_PROCESSED_PER_THREAD_Y.
- * @note The number of matrix A columns must be passed at compile time using -DCOLS_A.
- * @note The optional value of scalar alpha is passed at compile time using -DALPHA=alpha
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
- * @param[in] src0_attrs The attributes of the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_attrs The attributes of the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_attrs The attributes of the destination matrix
- */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes src0_attrs;
- ImageAttributes src1_attrs;
- ImageAttributes dst_attrs;
-};
-TENSOR_DECLARATION(1, src0Buffer, float, src0_ptr, src0_shift, 2, readonly);
-TENSOR_DECLARATION(2, src1Buffer, float, src1_ptr, src1_shift, 2, readonly);
-TENSOR_DECLARATION(3, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-
-void main()
-{
- ImageIterator src0_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src0_attrs, src0_shift);
- ImageIterator src1_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src1_attrs, src1_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
-
- int idx = int(gl_GlobalInvocationID.x) * int(NUM_ELEMS_PROCESSED_PER_THREAD_X);
- /* Compute the address for the vector A and matrix B */
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(gl_GlobalInvocationID.y) * (src0_attrs.stride_y) * uint(NUM_ELEMS_PROCESSED_PER_THREAD_Y));
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, idx * 4);
-
- /* Reset accumulators */
- vec4 acc0 = vec4(0.0f);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- vec4 acc1 = vec4(0.0f);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- vec4 acc2 = vec4(0.0f);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- vec4 acc3 = vec4(0.0f);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- // A and B src indices get incremented at the same time.
- int i = 0;
- for(; i <= (COLS_A - 4); i += 4)
- {
- // Load values from matrix A and matrix B
- vec4 a0 = VLOAD4_CURRENT_ITEM(vec4, src0_ptr, src0_iter);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- vec4 a1 = VLOAD4(vec4, src0_ptr, IMAGE_OFFSET(src0_iter, 0, 1));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- vec4 a2 = VLOAD4(vec4, src0_ptr, IMAGE_OFFSET(src0_iter, 0, 2));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- vec4 a3 = VLOAD4(vec4, src0_ptr, IMAGE_OFFSET(src0_iter, 0, 3));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- vec4 b0 = VLOAD4_CURRENT_ITEM(vec4, src1_ptr, src1_iter);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, src1_attrs.stride_y);
-
- // Multiply and accumulate
- acc0 += b0 * vec4(a0.x);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- acc1 += b0 * vec4(a1.x);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- acc2 += b0 * vec4(a2.x);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- acc3 += b0 * vec4(a3.x);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- // Load values from matrix B
- b0 = VLOAD4_CURRENT_ITEM(vec4, src1_ptr, src1_iter);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, src1_attrs.stride_y);
-
- // Multiply and accumulate
- acc0 += b0 * vec4(a0.y);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- acc1 += b0 * vec4(a1.y);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- acc2 += b0 * vec4(a2.y);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- acc3 += b0 * vec4(a3.y);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- // Load values from matrix B
- b0 = VLOAD4_CURRENT_ITEM(vec4, src1_ptr, src1_iter);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, src1_attrs.stride_y);
-
- // Multiply and accumulate
- acc0 += b0 * vec4(a0.z);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- acc1 += b0 * vec4(a1.z);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- acc2 += b0 * vec4(a2.z);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- acc3 += b0 * vec4(a3.z);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- // Load values from matrix B
- b0 = VLOAD4_CURRENT_ITEM(vec4, src1_ptr, src1_iter);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, src1_attrs.stride_y);
-
- // Multiply and accumulate
- acc0 += b0 * vec4(a0.w);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- acc1 += b0 * vec4(a1.w);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- acc2 += b0 * vec4(a2.w);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- acc3 += b0 * vec4(a3.w);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- TENSOR_ITERATOR_ADVANCE(src0_iter, 4);
- }
-
- for(; i < COLS_A; ++i)
- {
- // Load values from matrix A
- float a0 = LOAD_CURRENT_ITEM(src0_ptr, src0_iter);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- float a1 = LOAD(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 1));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- float a2 = LOAD(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 2));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- float a3 = LOAD(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 3));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- vec4 b0 = VLOAD4_CURRENT_ITEM(vec4, src1_ptr, src1_iter);
-
- // Multiply and accumulate
- acc0 += b0 * vec4(a0);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- acc1 += b0 * vec4(a1);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- acc2 += b0 * vec4(a2);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- acc3 += b0 * vec4(a3);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, src1_attrs.stride_y);
- TENSOR_ITERATOR_ADVANCE(src0_iter, 1);
- }
-
- /* Multiply by the weight of vector-matrix product */
- acc0 = acc0 * vec4(ALPHA);
- VSTORE4_CURRENT_ITEM(dst_ptr, dst_iter, acc0);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- acc1 = acc1 * vec4(ALPHA);
- VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), acc1);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- acc2 = acc2 * vec4(ALPHA);
- VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), acc2);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- acc3 = acc3 * vec4(ALPHA);
- VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), acc3);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-}
-#endif /* GEMM_MM_FLOATING_POINT_BIFROST */
-
-#ifdef GEMM_MATRIXADDITION
-/** This OpenGL ES kernel performs the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
- *
- * @attention The beta's value need to be passed at compile time using BETA
- *
- * @param[in] src_ptr Pointer to the source matrix. Supported data types: F32
- * @param[in] src_attrs The attributes of the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination matrix
- */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes src_attrs;
- ImageAttributes dst_attrs;
-};
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, restrict);
-
-void main(void)
-{
- /* Compute source and destination addresses */
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
-
- /* Load values from A x B */
- vec4 alpha_ab = VLOAD4_CURRENT_ITEM(vec4, dst_ptr, dst_iter);
- vec4 c = VLOAD4_CURRENT_ITEM(vec4, src_ptr, src_iter);
-
- /* Computes alpha * axb + beta * c */
- vec4 out1 = alpha_ab + vec4(float(BETA) * c);
-
- /* Store final result in axb matrix */
- VSTORE4_CURRENT_ITEM(dst_ptr, dst_iter, out1);
-}
-#endif /* GEMM_MATRIXADDITION */
-
-#elif defined(DATA_TYPE_FP16)
-
-#ifdef GEMM_TRANSPOSE1xW
-/** This OpenGL ES kernel computes the "vector" 1x8 transposition of input matrix
- *
- * @param[in] src_ptr Pointer to the source matrix. Supported data types: F16
- * @param[in] src_attrs The attributes of the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination matrix
- */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes src_attrs;
- ImageAttributes dst_attrs;
-};
-TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
-
-void main(void)
-{
- /* Compute address for Matrix B - source */
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
-
- /* Compute address for Matrix B transposed - destination. X and Y are swapped */
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, gl_GlobalInvocationID.y * uint(16) + gl_GlobalInvocationID.x * dst_attrs.stride_y);
-
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, LOAD_CURRENT_ITEM(src_ptr, src_iter));
-}
-#endif /* GEMM_TRANSPOSE1xW */
-
-#ifdef GEMM_INTERLEAVE4x4
-/** This OpenGLES kernel reshapes the input matrix interleaving the values
- *
- * @param[in] src_ptr Pointer to the source matrix. Supported data types: F16
- * @param[in] src_attrs The attributes of the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination matrix
- */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes src_attrs;
- ImageAttributes dst_attrs;
-};
-TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
-
-void main(void)
-{
- /* Compute source and destination addresses */
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
-
- vec4 s0[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src_ptr, src_iter);
- vec4 s1[2] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 1));
- vec4 s2[2] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 2));
- vec4 s3[2] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 3));
-
- vec4 s[2];
- s[0] = vec4(s0[0].x, s1[0].x, s2[0].x, s3[0].x);
- s[1] = vec4(s0[0].y, s1[0].y, s2[0].y, s3[0].y);
- STORE_PACK8_CURRENT_ITEM_HALF(dst_ptr, dst_iter, s);
-
- s[0] = vec4(s0[0].z, s1[0].z, s2[0].z, s3[0].z);
- s[1] = vec4(s0[0].w, s1[0].w, s2[0].w, s3[0].w);
- STORE_PACK8_HALF(dst_ptr, TENSOR_OFFSET_ADVANCE(dst_iter, 1u), s);
-
- s[0] = vec4(s0[1].x, s1[1].x, s2[1].x, s3[1].x);
- s[1] = vec4(s0[1].y, s1[1].y, s2[1].y, s3[1].y);
- STORE_PACK8_HALF(dst_ptr, TENSOR_OFFSET_ADVANCE(dst_iter, 2u), s);
-
- s[0] = vec4(s0[1].z, s1[1].z, s2[1].z, s3[1].z);
- s[1] = vec4(s0[1].w, s1[1].w, s2[1].w, s3[1].w);
- STORE_PACK8_HALF(dst_ptr, TENSOR_OFFSET_ADVANCE(dst_iter, 3u), s);
-}
-#endif /* GEMM_INTERLEAVE4x4 */
-
-#ifdef GEMM_MM_FLOATING_POINT
-/** This OpenGL ES kernel computes the matrix multiplication between matrix A(src0) and matrix B(src1)
- * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_16bit and @ref gemm_transpose1x4 before running the matrix multiplication
- *
- * @note The optional value of scalar alpha is passed at compile time using -DALPHA=alpha
- *
- * @param[in] src0_ptr Pointer to the source matrix.Supported data types: F16
- * @param[in] src0_attrs The attributes of the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_attrs The attributes of the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_attrs The attributes of the destination matrix
- */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes src0_attrs;
- ImageAttributes src1_attrs;
- ImageAttributes dst_attrs;
-};
-
-#if defined(MM_PROCESS_4X)
-TENSOR_DECLARATION(1, src0Buffer, uint, src0_ptr, src0_shift, 2, readonly);
-TENSOR_DECLARATION(2, src1Buffer, uvec2, src1_ptr, src1_shift, 3, readonly);
-TENSOR_DECLARATION(3, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-
-void main()
-{
- ImageIterator src0_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src0_attrs, src0_shift);
- ImageIterator src1_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src1_attrs, src1_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
-
- int idx = int(gl_GlobalInvocationID.x) * int(NUM_ELEMS_PROCESSED_PER_THREAD_X);
- /* Compute the address for the vector A and matrix B */
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(gl_GlobalInvocationID.y) * src0_attrs.stride_y * uint(NUM_ELEMS_PROCESSED_PER_THREAD_Y));
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(idx) * src1_attrs.stride_x);
-
- /* Compute end row address for matrix A */
- uint end_row_vec_a = uint(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) + uint(COLS_A << 1);
-
- /* Reset accumulators */
- vec4 acc0 = vec4(0.0f);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- vec4 acc1 = vec4(0.0f);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- vec4 acc2 = vec4(0.0f);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- vec4 acc3 = vec4(0.0f);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- for(; int(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) <= int(end_row_vec_a - uint(4));
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, 2 * 2), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(2) * src1_attrs.stride_y))
- {
- vec2 a0 = LOAD_UNPACK2_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- vec2 a1 = LOAD_UNPACK2_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 1));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- vec2 a2 = LOAD_UNPACK2_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 2));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- vec2 a3 = LOAD_UNPACK2_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 3));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- vec4 b0 = LOAD_UNPACK4_CURRENT_ITEM_HALF(src1_ptr, src1_iter);
- vec4 b1 = LOAD_UNPACK4_HALF(src1_ptr, IMAGE_OFFSET(src1_iter, 0, 1));
-
- acc0 += b0 * vec4(a0.x);
- acc0 += b1 * vec4(a0.y);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- acc1 += b0 * vec4(a1.x);
- acc1 += b1 * vec4(a1.y);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- acc2 += b0 * vec4(a2.x);
- acc2 += b1 * vec4(a2.y);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- acc3 += b0 * vec4(a3.x);
- acc3 += b1 * vec4(a3.y);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- }
-
- for(; int(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) < int(end_row_vec_a); TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, 2 * 2), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, src1_attrs.stride_y))
- {
- vec2 a0 = LOAD_UNPACK2_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- vec2 a1 = LOAD_UNPACK2_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 1));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- vec2 a2 = LOAD_UNPACK2_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 2));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- vec2 a3 = LOAD_UNPACK2_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 3));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- vec4 b0 = LOAD_UNPACK4_CURRENT_ITEM_HALF(src1_ptr, src1_iter);
-
- acc0 += b0 * (a0.x);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- acc1 += b0 * (a1.x);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- acc2 += b0 * (a2.x);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- acc3 += b0 * (a3.x);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- }
-
- /* Multiply by the weight of vector-matrix product */
- acc0 = acc0 * vec4(ALPHA);
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, acc0);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), acc1);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), acc2);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), acc3);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-}
-#elif defined(MM_PROCESS_4X_OPTIMIZED) /* PROCESS_4X */
-TENSOR_DECLARATION(1, src0Buffer, uvec4, src0_ptr, src0_shift, 4, readonly);
-TENSOR_DECLARATION(2, src1Buffer, uvec2, src1_ptr, src1_shift, 3, readonly);
-TENSOR_DECLARATION(3, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-
-void main()
-{
- ImageIterator src0_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src0_attrs, src0_shift);
- ImageIterator src1_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src1_attrs, src1_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
-
- int idx = int(gl_GlobalInvocationID.x) * int(NUM_ELEMS_PROCESSED_PER_THREAD_X);
- /* Compute the address for the vector A and matrix B */
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(gl_GlobalInvocationID.y) * src0_attrs.stride_y * uint(NUM_ELEMS_PROCESSED_PER_THREAD_Y));
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(idx) * src1_attrs.stride_x);
-
- /* Compute end row address for matrix A */
- uint end_row_vec_a = uint(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) + uint(COLS_A << 1);
-
- /* Reset accumulators */
- vec4 acc0 = vec4(0.0f);
-
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- vec4 acc1 = vec4(0.0f);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- vec4 acc2 = vec4(0.0f);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- vec4 acc3 = vec4(0.0f);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- for(; int(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) <= int(end_row_vec_a - uint(16));
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(8) * src0_attrs.stride_x), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(8) * src1_attrs.stride_y))
- {
- vec4 a0[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
-
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- vec4 a1[2] = LOAD_UNPACK8_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 1));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- vec4 a2[2] = LOAD_UNPACK8_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 2));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- vec4 a3[2] = LOAD_UNPACK8_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 3));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- vec4 b;
-
- for(int i = 0; i < 8; i++)
- {
- int j = i >> 2;
- int k = i % 4;
-
- b = LOAD_UNPACK4_HALF(src1_ptr, IMAGE_OFFSET(src1_iter, 0, i));
-
- acc0 += b * vec4(a0[j][k]);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- acc1 += b * vec4(a1[j][k]);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- acc2 += b * vec4(a2[j][k]);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- acc3 += b * vec4(a3[j][k]);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- }
- }
-
- for(; int(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) < int(end_row_vec_a); TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, 2 * 8), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(8) * src1_attrs.stride_y))
- {
- vec4 a0[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
-
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- vec4 a1[2] = LOAD_UNPACK8_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 1));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- vec4 a2[2] = LOAD_UNPACK8_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 2));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- vec4 a3[2] = LOAD_UNPACK8_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 3));
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-
- vec4 b;
-
- int leftover = COLS_A % 8;
-
- for(int i = 0; i < leftover; i++)
- {
- int j = i >> 2;
- int k = i % 4;
-
- b = LOAD_UNPACK4_HALF(src1_ptr, IMAGE_OFFSET(src1_iter, 0, i));
-
- acc0 += b * vec4(a0[j][k]);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- acc1 += b * vec4(a1[j][k]);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- acc2 += b * vec4(a2[j][k]);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- acc3 += b * vec4(a3[j][k]);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- }
- }
-
- /* Multiply by the weight of vector-matrix product */
- acc0 = acc0 * vec4(ALPHA);
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, acc0);
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
- STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), acc1);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
- STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), acc2);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
-#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
- STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), acc3);
-#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
-}
-#elif defined(MM_PROCESS_8X) /* PROCESS_8X */
-TENSOR_DECLARATION(1, src0Buffer, uvec4, src0_ptr, src0_shift, 4, readonly);
-TENSOR_DECLARATION(2, src1Buffer, uvec4, src1_ptr, src1_shift, 4, readonly);
-TENSOR_DECLARATION(3, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
-
-void main()
-{
- ImageIterator src0_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src0_attrs, src0_shift);
- ImageIterator src1_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src1_attrs, src1_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
-
- int idx = int(gl_GlobalInvocationID.x) * int(NUM_ELEMS_PROCESSED_PER_THREAD_X);
- /* Compute the address for the vector A and matrix B */
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(gl_GlobalInvocationID.y) * src0_attrs.stride_y * uint(NUM_ELEMS_PROCESSED_PER_THREAD_Y));
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(idx) * src1_attrs.stride_x);
-
- /* Compute end row address for matrix A */
- uint end_row_vec_a = uint(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) + uint(COLS_A << 1);
-
- /* Reset accumulators */
- vec4 acc[2];
-
- acc[0] = vec4(0.0f);
- acc[1] = vec4(0.0f);
-
- for(; int(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) <= int(end_row_vec_a - uint(16));
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(8) * src0_attrs.stride_x), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(8) * src1_attrs.stride_y))
- {
- vec4 a[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
- vec4 b[2];
-
- for(int i = 0; i < 8; i++)
- {
- int j = i >> 2;
- int k = i % 4;
-
- b = LOAD_UNPACK8_HALF(src1_ptr, IMAGE_OFFSET(src1_iter, 0, i));
-
- acc[0] += b[0] * vec4(a[j][k]);
- acc[1] += b[1] * vec4(a[j][k]);
- }
- }
-
- for(; int(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) < int(end_row_vec_a);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(8) * uint(2)), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(8) * src1_attrs.stride_y))
- {
- vec4 a[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
- vec4 b[2];
-
- int leftover = COLS_A % 8;
-
- for(int i = 0; i < leftover; i++)
- {
- int j = i >> 2;
- int k = i % 4;
-
- b = LOAD_UNPACK8_HALF(src1_ptr, IMAGE_OFFSET(src1_iter, 0, i));
-
- acc[0] += b[0] * vec4(a[j][k]);
- acc[1] += b[1] * vec4(a[j][k]);
- }
- }
-
- /* Multiply by the weight of vector-matrix product */
- acc[0] = acc[0] * vec4(ALPHA);
- acc[1] = acc[1] * vec4(ALPHA);
-
- STORE_PACK8_CURRENT_ITEM_HALF(dst_ptr, dst_iter, acc);
-}
-#endif /* PROCESS_8X */
-#endif /* GEMM_MM_FLOATING_POINT */
-
-#ifdef GEMM_ACCUMULATE_BIASES
-#if defined(ACCUM_PROCESS_4X)
-/** This kernel accumulates each row with the biases vector
- *
- * @param[in, out] accum_ptr Pointer to the accumulate tensor. Supported data type: F16
- * @param[in] accum_attrs The attributes of the accumulate tensor
- * @param[in] biases_ptr Pointer to the biases vector. Same as @p accum_ptr
- * @param[in] biases_attrs The attributes of the biases tensor
- */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes accum_attrs;
- VectorAttributes biases_attrs;
-};
-
-TENSOR_DECLARATION(1, accumBuffer, uvec2, accum_ptr, accum_shift, 3, restrict);
-TENSOR_DECLARATION(2, biasesBuffer, uvec2, biases_ptr, biases_shift, 3, readonly);
-
-void main(void)
-{
- ImageIterator accum_iter = CONVERT_TO_IMAGE_ITERATOR(accum_attrs, accum_shift);
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR(biases_attrs, biases_shift);
-
- vec4 u[2];
- u[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(accum_ptr, accum_iter);
- u[1] = LOAD_UNPACK4_CURRENT_ITEM_HALF(biases_ptr, biases_iter);
-
- vec4 tmp;
- tmp = u[0] + u[1];
- STORE_PACK4_CURRENT_ITEM_HALF(accum_ptr, accum_iter, tmp);
-}
-#elif defined(ACCUM_PROCESS_8X) /* ACCUM_PROCESS_8X */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes accum_attrs;
- VectorAttributes biases_attrs;
-};
-
-TENSOR_DECLARATION(1, accumBuffer, uvec4, accum_ptr, accum_shift, 4, restrict);
-TENSOR_DECLARATION(2, biasesBuffer, uvec4, biases_ptr, biases_shift, 4, readonly);
-
-void main(void)
-{
- ImageIterator accum_iter = CONVERT_TO_IMAGE_ITERATOR(accum_attrs, accum_shift);
- VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR(biases_attrs, biases_shift);
-
- vec4 u[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(accum_ptr, accum_iter);
- vec4 v[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(biases_ptr, biases_iter);
-
- vec4 r[2];
- r[0] = u[0] + v[0];
- r[1] = u[1] + v[1];
- STORE_PACK8_CURRENT_ITEM_HALF(accum_ptr, accum_iter, r);
-}
-#endif /* ACCUM_PROCESS_8X */
-#endif /* GEMM_ACCUMULATE_BIASES */
-
-#ifdef GEMM_MM_INTERLEAVED_TRANSPOSED
-/** This OpenGL ES kernel is optimised for Midgard. It computes the matrix multiplication between matrix A (src0) and matrix B (src1)
- * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication
- *
- * @note The optional value of scalar alpha is passed at compile time using -DALPHA=alpha
- *
- * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16
- * @param[in] src0_attrs The attributes of the source matrix
- * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
- * @param[in] src1_attrs The attributes of the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
- * @param[in] dst_attrs The attributes of the destination matrix
- */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes src0_attrs;
- ImageAttributes src1_attrs;
- ImageAttributes dst_attrs;
-};
-TENSOR_DECLARATION(1, src0Buffer, uvec2, src0_ptr, src0_shift, 3, readonly);
-TENSOR_DECLARATION(2, src1Buffer, uvec4, src1_ptr, src1_shift, 4, readonly);
-TENSOR_DECLARATION(3, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
-
-void main()
-{
- ImageIterator src0_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src0_attrs, src0_shift);
- ImageIterator src1_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src1_attrs, src1_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
-
- /* Compute address for matrix A and B */
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(gl_GlobalInvocationID.y) * (src0_attrs.stride_y));
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(gl_GlobalInvocationID.x) * (src1_attrs.stride_y));
- /* Compute end row address for matrix B */
- int end_row_mtx_b = (int(CURRENT_ITEM_OFFSET_IN_BYTES(src1_iter)) >> 1) + int(COLS_B);
-
- /* Reset accumulators */
- vec4 c00[2];
- vec4 c10[2];
- vec4 c20[2];
- vec4 c30[2];
- c00[0] = vec4(0.0f);
- c00[1] = vec4(0.0f);
- c10[0] = vec4(0.0f);
- c10[1] = vec4(0.0f);
- c20[0] = vec4(0.0f);
- c20[1] = vec4(0.0f);
- c30[0] = vec4(0.0f);
- c30[1] = vec4(0.0f);
-
- // FIXME: loop unrolling really needed for GLES?
- for(; (int(CURRENT_ITEM_OFFSET_IN_BYTES(src1_iter)) >> 1) <= (end_row_mtx_b - 16); TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, 16), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, 32))
- {
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
- vec4 a0 = LOAD_UNPACK4_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
- vec4 b0[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src1_ptr, src1_iter);
-
- c00[0] += vec4(a0.x) * b0[0];
- c00[1] += vec4(a0.x) * b0[1];
- c10[0] += vec4(a0.y) * b0[0];
- c10[1] += vec4(a0.y) * b0[1];
- c20[0] += vec4(a0.z) * b0[0];
- c20[1] += vec4(a0.z) * b0[1];
- c30[0] += vec4(a0.w) * b0[0];
- c30[1] += vec4(a0.w) * b0[1];
-
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
- a0 = LOAD_UNPACK4_HALF(src0_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(src0_iter, 8));
- b0 = LOAD_UNPACK8_HALF(src1_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(src1_iter, 16));
-
- c00[0] += vec4(a0.x) * b0[0];
- c00[1] += vec4(a0.x) * b0[1];
- c10[0] += vec4(a0.y) * b0[0];
- c10[1] += vec4(a0.y) * b0[1];
- c20[0] += vec4(a0.z) * b0[0];
- c20[1] += vec4(a0.z) * b0[1];
- c30[0] += vec4(a0.w) * b0[0];
- c30[1] += vec4(a0.w) * b0[1];
- }
-
- for(; (int(CURRENT_ITEM_OFFSET_IN_BYTES(src1_iter)) >> 1) < end_row_mtx_b; TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, 8), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, 16))
- {
- /* Load values from matrix A (interleaved) and matrix B (transposed) */
- vec4 a0 = LOAD_UNPACK4_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
- vec4 b0[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src1_ptr, src1_iter);
-
- c00[0] += vec4(a0.x) * b0[0];
- c00[1] += vec4(a0.x) * b0[1];
- c10[0] += vec4(a0.y) * b0[0];
- c10[1] += vec4(a0.y) * b0[1];
- c20[0] += vec4(a0.z) * b0[0];
- c20[1] += vec4(a0.z) * b0[1];
- c30[0] += vec4(a0.w) * b0[0];
- c30[1] += vec4(a0.w) * b0[1];
- }
-
- /* Multiply by the weight of matrix product */
- c00[0] = c00[0] * vec4(ALPHA);
- c00[1] = c00[1] * vec4(ALPHA);
- c10[0] = c10[0] * vec4(ALPHA);
- c10[1] = c10[1] * vec4(ALPHA);
- c20[0] = c20[0] * vec4(ALPHA);
- c20[1] = c20[1] * vec4(ALPHA);
- c30[0] = c30[0] * vec4(ALPHA);
- c30[1] = c30[1] * vec4(ALPHA);
-
- /* Store 4x8 block */
- STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 0), c00);
- STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), c10);
- STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), c20);
- STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), c30);
-}
-#endif /* GEMM_MM_INTERLEAVED_TRANSPOSED */
-#else /* DATA_TYPE_FP16 */
-#error Data type not supported
-#endif /* DATA_TYPE_FP32 */
diff --git a/src/core/GLES_COMPUTE/cs_shaders/helpers_cs.h b/src/core/GLES_COMPUTE/cs_shaders/helpers_cs.h
deleted file mode 100644
index 4e3551700f..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/helpers_cs.h
+++ /dev/null
@@ -1,498 +0,0 @@
-/*
- * Copyright (c) 2017-2018 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef ARM_COMPUTE_HELPER_CS_H
-#define ARM_COMPUTE_HELPER_CS_H
-
-#define SHADER_PARAMS_DECLARATION \
- layout(std140, binding = 0) uniform shader_params
-
-#define TENSOR_DECLARATION(location, buffer_type, type, ptr_name, shift_name, element_shift, access) \
- layout(std430, binding = location) access buffer buffer_type \
- { \
- type ptr_name[]; \
- }; \
- const uint shift_name = uint(element_shift)
-
-struct VectorAttributes
-{
- uint stride_x; /**< Stride of the vector in X dimension (in bytes) */
- uint step_x; /**< stride_x * number of elements along X processed per workitem (in bytes) */
- uint offset_first_element_in_bytes; /**< The offset of the first element in the vector (in bytes) */
- uint padding; /**< The padding to rounding up the structure to a multiple of a vec4 */
-};
-
-struct ImageAttributes
-{
- uint stride_x; /**< Stride of the image in X dimension (in bytes) */
- uint step_x; /**< stride_x * number of elements along X processed per workitem (in bytes) */
- uint stride_y; /**< Stride of the image in Y dimension (in bytes) */
- uint step_y; /**< stride_y * number of elements along Y processed per workitem (in bytes) */
- uint offset_first_element_in_bytes; /**< The offset of the first element in the image (in bytes) */
- uint padding1; /**< The padding to rounding up the structure to a multiple of a vec4 */
- uint padding2; /**< The padding to rounding up the structure to a multiple of a vec4 */
- uint padding3; /**< The padding to rounding up the structure to a multiple of a vec4 */
-};
-
-struct Tensor3DAttributes
-{
- uint stride_x; /**< Stride of the tensor in X dimension (in bytes) */
- uint step_x; /**< stride_x * number of elements along X processed per workitem (in bytes) */
- uint stride_y; /**< Stride of the tensor in Y dimension (in bytes) */
- uint step_y; /**< stride_y * number of elements along Y processed per workitem (in bytes) */
- uint stride_z; /**< Stride of the tensor in Z dimension (in bytes) */
- uint step_z; /**< stride_z * number of elements along Z processed per workitem (in bytes) */
- uint offset_first_element_in_bytes; /**< The offset of the first element in the tensor (in bytes) */
- uint padding; /**< The padding to rounding up the structure to a multiple of a vec4 */
-};
-
-struct VectorIterator
-{
- int current_offset_in_bytes; /**< Current offset of vector (in bytes) */
- int stride_x; /**< Stride of the vector in X dimension (in bytes) */
- int element_shift; /**< The number of bits to shift by for one element */
-};
-
-struct ImageIterator
-{
- int current_offset_in_bytes; /**< Current offset of image (in bytes) */
- int stride_x; /**< Stride of the image in X dimension (in bytes) */
- int stride_y; /**< Stride of the image in Y dimension (in bytes) */
- int element_shift; /**< The number of bits to shift by for one element */
-};
-
-struct Tensor3DIterator
-{
- int current_offset_in_bytes; /**< Current offset of tensor (in bytes) */
- int stride_x; /**< Stride of the tensor in X dimension (in bytes) */
- int stride_y; /**< Stride of the tensor in Y dimension (in bytes) */
- int stride_z; /**< Stride of the tensor in Z dimension (in bytes) */
- int element_shift; /**< The number of bits to shift by for one element */
-};
-
-#define CONVERT_TO_VECTOR_ITERATOR(attrs, element_shift) \
- update_vector_iter_offset(element_shift, attrs.offset_first_element_in_bytes, \
- attrs.stride_x, attrs.step_x)
-
-#define CONVERT_TO_VECTOR_ITERATOR_NO_STEP(attrs, element_shift) \
- update_vector_iter_offset(element_shift, attrs.offset_first_element_in_bytes, \
- attrs.stride_x, uint(0))
-
-#define CONVERT_TO_IMAGE_ITERATOR(attrs, element_shift) \
- update_image_iter_offset(element_shift, attrs.offset_first_element_in_bytes, \
- attrs.stride_x, attrs.step_x, attrs.stride_y, attrs.step_y)
-
-#define CONVERT_TO_IMAGE_ITERATOR_NO_STEP(attrs, element_shift) \
- update_image_iter_offset(element_shift, attrs.offset_first_element_in_bytes, \
- attrs.stride_x, uint(0), attrs.stride_y, uint(0))
-
-#define CONVERT_TO_TENSOR3D_ITERATOR(attrs, element_shift) \
- update_tensor3D_iter_offset(element_shift, attrs.offset_first_element_in_bytes, \
- attrs.stride_x, attrs.step_x, attrs.stride_y, attrs.step_y, attrs.stride_z, attrs.step_z)
-
-#define CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(attrs, element_shift) \
- update_tensor3D_iter_offset(element_shift, attrs.offset_first_element_in_bytes, \
- attrs.stride_x, uint(0), attrs.stride_y, uint(0), attrs.stride_z, uint(0))
-
-#define CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(attrs, element_shift) \
- update_image_from_tensor3D_iter_offset(element_shift, attrs.offset_first_element_in_bytes, \
- attrs.stride_x, attrs.step_x, attrs.stride_y, attrs.step_y, attrs.stride_z, attrs.step_z)
-
-#define CONVERT_TENSOR3D_TO_IMAGE_ITERATOR_NO_STEP(attrs, element_shift) \
- update_image_from_tensor3D_iter_offset(element_shift, attrs.offset_first_element_in_bytes, \
- attrs.stride_x, uint(0), attrs.stride_y, uint(0), attrs.stride_z, attrs.step_z)
-
-/** Wrap vector information into a VectorIterator structure, and make the offset to be this workitem's position.
- *
- * @param[in] element_shift The number of bits to shift by for one element
- * @param[in] offset_first_element_in_bytes The offset of the first element in the source vector
- * @param[in] stride_x Stride of the vector in X dimension (in bytes)
- * @param[in] step_x stride_x * number of elements along X processed per workitem (in bytes)
- *
- * @return A VectorIterator object
- */
-VectorIterator update_vector_iter_offset(uint element_shift, uint offset_first_element_in_bytes, uint stride_x, uint step_x)
-{
- VectorIterator vector_iter;
- vector_iter.element_shift = int(element_shift);
- vector_iter.stride_x = int(stride_x);
- vector_iter.current_offset_in_bytes = int(offset_first_element_in_bytes + gl_GlobalInvocationID.x * step_x);
-
- return vector_iter;
-}
-
-/** Wrap image information into an ImageIterator structure, and make the offset to be this workitem's position.
- *
- * @param[in] element_shift The number of bits to shift by for one element
- * @param[in] offset_first_element_in_bytes The offset of the first element in the source image
- * @param[in] stride_x Stride of the image in X dimension (in bytes)
- * @param[in] step_x stride_x * number of elements along X processed per workitem (in bytes)
- * @param[in] stride_y Stride of the image in Y dimension (in bytes)
- * @param[in] step_y stride_y * number of elements along Y processed per workitem (in bytes)
- *
- * @return An ImageIterator object
- */
-ImageIterator update_image_iter_offset(uint element_shift, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y)
-{
- ImageIterator image_iter;
- image_iter.element_shift = int(element_shift);
- image_iter.stride_x = int(stride_x);
- image_iter.stride_y = int(stride_y);
- image_iter.current_offset_in_bytes = int(offset_first_element_in_bytes + gl_GlobalInvocationID.x * step_x + gl_GlobalInvocationID.y * step_y);
-
- return image_iter;
-}
-
-/** Wrap 3D tensor information into a Tensor3DIterator structure, and make the offset to be this workitem's position.
- *
- * @param[in] element_shift The number of bits to shift by for one element
- * @param[in] offset_first_element_in_bytes The offset of the first element in the source tersor
- * @param[in] stride_x Stride of the tersor in X dimension (in bytes)
- * @param[in] step_x stride_x * number of elements along X processed per workitem (in bytes)
- * @param[in] stride_y Stride of the tersor in Y dimension (in bytes)
- * @param[in] step_y stride_y * number of elements along Y processed per workitem (in bytes)
- * @param[in] stride_z Stride of the tersor in Z dimension (in bytes)
- * @param[in] step_z stride_z * number of elements along Z processed per workitem (in bytes)
- *
- * @return A 3D Tensor3DIterator object
- */
-Tensor3DIterator update_tensor3D_iter_offset(uint element_shift, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
-{
- Tensor3DIterator tensor_iter;
- tensor_iter.element_shift = int(element_shift);
- tensor_iter.stride_x = int(stride_x);
- tensor_iter.stride_y = int(stride_y);
- tensor_iter.stride_z = int(stride_z);
- tensor_iter.current_offset_in_bytes = int(offset_first_element_in_bytes + gl_GlobalInvocationID.x * step_x + gl_GlobalInvocationID.y * step_y + gl_GlobalInvocationID.z * step_z);
-
- return tensor_iter;
-}
-
-/** Wrap 3D tensor information into an ImageIterator structure, and make the offset to be this workitem's position.
- *
- * @param[in] element_shift The number of bits to shift by for one element
- * @param[in] offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in] stride_x Stride of the tensor in X dimension (in bytes)
- * @param[in] step_x stride_x * number of elements along X processed per workitem (in bytes)
- * @param[in] stride_y Stride of the tensor in Y dimension (in bytes)
- * @param[in] step_y stride_y * number of elements along Y processed per workitem (in bytes)
- * @param[in] stride_z Stride of the tensor in Z dimension (in bytes)
- * @param[in] step_z stride_z * number of elements along Z processed per workitem (in bytes)
- *
- * @return An ImageIterator object
- */
-ImageIterator update_image_from_tensor3D_iter_offset(uint element_shift, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
-{
- ImageIterator image_iter;
- image_iter.element_shift = int(element_shift);
- image_iter.stride_x = int(stride_x);
- image_iter.stride_y = int(stride_y);
- image_iter.current_offset_in_bytes = int(offset_first_element_in_bytes + gl_GlobalInvocationID.x * step_x + gl_GlobalInvocationID.y * step_y + gl_GlobalInvocationID.z * step_z);
-
- return image_iter;
-}
-
-#define VECTOR_OFFSET(tensor_iter, x) \
- uint(vector_offset_in_bytes(tensor_iter, int(x)) >> tensor_iter.element_shift)
-
-#define IMAGE_OFFSET(tensor_iter, x, y) \
- uint(image_offset_in_bytes(tensor_iter, int(x), int(y)) >> tensor_iter.element_shift)
-
-#define TENSOR3D_OFFSET(tensor_iter, x, y, z) \
- uint(tensor3D_offset_in_bytes(tensor_iter, int(x), int(y), int(z)) >> tensor_iter.element_shift)
-
-#define TENSOR_OFFSET_ADVANCE(tensor_iter, n) \
- uint((tensor_iter.current_offset_in_bytes >> tensor_iter.element_shift) + int(n))
-
-#define TENSOR_OFFSET_ADVANCE_IN_BYTES(tensor_iter, n) \
- uint((tensor_iter.current_offset_in_bytes + int(n)) >> tensor_iter.element_shift)
-
-#define CURRENT_ITEM_OFFSET(tensor_iter) \
- uint(tensor_iter.current_offset_in_bytes >> tensor_iter.element_shift)
-
-#define CURRENT_ITEM_OFFSET_IN_BYTES(tensor_iter) \
- uint(tensor_iter.current_offset_in_bytes)
-
-#define TENSOR_ITERATOR_ADVANCE(tensor_iter, n) \
- tensor_iter.current_offset_in_bytes += (int(n) << tensor_iter.element_shift)
-
-#define TENSOR_ITERATOR_ADVANCE_IN_BYTES(tensor_iter, n) \
- tensor_iter.current_offset_in_bytes += int(n)
-
-#define SET_TENSOR_ITERATOR_OFFSET_IN_BYTES(tensor_iter, n) \
- tensor_iter.current_offset_in_bytes = int(n)
-
-/** Get the offset of a VectorIterator
- *
- * @param[in] vector_iter The VectorIterator object pointed to the starting position of the buffer
- * @param[in] x Relative X position
- *
- * @return The relative offset of the VectorIterator object (in bytes)
- */
-uint vector_offset_in_bytes(VectorIterator vector_iter, int x)
-{
- return uint(vector_iter.current_offset_in_bytes + x * vector_iter.stride_x);
-}
-
-/** Get the offset of an ImageIterator
- *
- * @param[in] vector_iter The ImageIterator object pointed to the starting position of the buffer
- * @param[in] x Relative X position
- * @param[in] y Relative Y position
- *
- * @return The relative offset of the ImageIterator object (in bytes)
- */
-uint image_offset_in_bytes(ImageIterator image_iter, int x, int y)
-{
- return uint(image_iter.current_offset_in_bytes + x * image_iter.stride_x + y * image_iter.stride_y);
-}
-
-/** Get the offset of a Tensor3DIterator
- *
- * @param[in] vector_iter The Tensor3DIterator object pointed to the starting position of the buffer
- * @param[in] x Relative X position
- * @param[in] y Relative Y position
- * @param[in] z Relative Z position
- *
- * @return The relative offset of the Tensor3DIterator object (in bytes)
- */
-uint tensor3D_offset_in_bytes(Tensor3DIterator tensor_iter, int x, int y, int z)
-{
- return uint(tensor_iter.current_offset_in_bytes + x * tensor_iter.stride_x + y * tensor_iter.stride_y + z * tensor_iter.stride_z);
-}
-
-#define LOAD(tensor_ptr, offset) tensor_ptr[offset]
-#define STORE(tensor_ptr, offset, data) tensor_ptr[offset] = data
-#define LOAD_CURRENT_ITEM(tensor_ptr, tensor_iter) tensor_ptr[CURRENT_ITEM_OFFSET(tensor_iter)]
-#define STORE_CURRENT_ITEM(tensor_ptr, tensor_iter, data) tensor_ptr[CURRENT_ITEM_OFFSET(tensor_iter)] = data
-
-#define VLOAD2(return_type, tensor_ptr, offset) \
- return_type(LOAD(tensor_ptr, offset), \
- LOAD(tensor_ptr, (offset) + uint(1)))
-
-#define VSTORE2(tensor_ptr, offset, data) \
- STORE(tensor_ptr, offset, data[0]); \
- STORE(tensor_ptr, (offset) + uint(1), data[1])
-
-#define VLOAD2_CURRENT_ITEM(return_type, tensor_ptr, tensor_iter) VLOAD2(return_type, tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter))
-#define VSTORE2_CURRENT_ITEM(tensor_ptr, tensor_iter, data) VSTORE2(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter), data)
-
-#define VLOAD3(return_type, tensor_ptr, offset) \
- return_type(LOAD(tensor_ptr, offset), \
- LOAD(tensor_ptr, (offset) + uint(1)), \
- LOAD(tensor_ptr, (offset) + uint(2)))
-
-#define VSTORE3(tensor_ptr, offset, data) \
- STORE(tensor_ptr, offset, data[0]); \
- STORE(tensor_ptr, (offset) + uint(1), data[1]); \
- STORE(tensor_ptr, (offset) + uint(2), data[2])
-
-#define VLOAD3_CURRENT_ITEM(return_type, tensor_ptr, tensor_iter) VLOAD3(return_type, tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter))
-#define VSTORE3_CURRENT_ITEM(tensor_ptr, tensor_iter, data) VSTORE3(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter), data)
-
-#define VLOAD4(return_type, tensor_ptr, offset) \
- return_type(LOAD(tensor_ptr, offset), \
- LOAD(tensor_ptr, (offset) + uint(1)), \
- LOAD(tensor_ptr, (offset) + uint(2)), \
- LOAD(tensor_ptr, (offset) + uint(3)))
-
-#define VSTORE4(tensor_ptr, offset, data) \
- STORE(tensor_ptr, offset, data[0]); \
- STORE(tensor_ptr, (offset) + uint(1), data[1]); \
- STORE(tensor_ptr, (offset) + uint(2), data[2]); \
- STORE(tensor_ptr, (offset) + uint(3), data[3])
-
-#define VLOAD4_CURRENT_ITEM(return_type, tensor_ptr, tensor_iter) VLOAD4(return_type, tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter))
-#define VSTORE4_CURRENT_ITEM(tensor_ptr, tensor_iter, data) VSTORE4(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter), data)
-
-#define VLOAD5(return_type, tensor_ptr, offset) \
- return_type(LOAD(tensor_ptr, offset), \
- LOAD(tensor_ptr, (offset) + uint(1)), \
- LOAD(tensor_ptr, (offset) + uint(2)), \
- LOAD(tensor_ptr, (offset) + uint(3)), \
- LOAD(tensor_ptr, (offset) + uint(4)))
-
-#define VSTORE5(tensor_ptr, offset, data) \
- STORE(tensor_ptr, offset, data[0]); \
- STORE(tensor_ptr, (offset) + uint(1), data[1]); \
- STORE(tensor_ptr, (offset) + uint(2), data[2]); \
- STORE(tensor_ptr, (offset) + uint(3), data[3]); \
- STORE(tensor_ptr, (offset) + uint(4), data[4])
-
-#define VLOAD5_CURRENT_ITEM(return_type, tensor_ptr, tensor_iter) VLOAD5(return_type, tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter))
-#define VSTORE5_CURRENT_ITEM(tensor_ptr, tensor_iter, data) VSTORE5(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter), data)
-
-/** Converting the vec4 object to 4 half-precision (16-bits) floating point values and packing into a uvec2 object
- *
- * @param[in] data The vec4 object to be packed
- *
- * @return The packed uvec2 object
- */
-highp uvec2 pack4_half(mediump vec4 data)
-{
- return uvec2(packHalf2x16(data.xy), packHalf2x16(data.zw));
-}
-
-/** Unpacking the uvec2 object to 4 half-precision (16-bits) floating point values and converting to a vec4 object
- *
- * @param[in] packed_data The uvec2 object to be unpacked
- *
- * @return The unpacked vec4 object
- */
-mediump vec4 unpack4_half(highp uvec2 packed_data)
-{
- return vec4(unpackHalf2x16(packed_data.x), unpackHalf2x16(packed_data.y));
-}
-
-/** Unpacking the uvec3 object to 6 half-precision (16-bits) floating point values and converting to a vec2[3] object
- *
- * @param[in] packed_data The uvec3 object to be unpacked
- *
- * @return The unpacked vec2[3] object
- */
-mediump vec2[3] unpack6_half(highp uvec3 packed_data)
-{
- return vec2[3](unpackHalf2x16(packed_data[0]),
- unpackHalf2x16(packed_data[1]),
- unpackHalf2x16(packed_data[2]));
-}
-
-/** Converting the vec4[2] object to 8 half-precision (16-bits) floating point values and packing into a uvec4 object
- *
- * @param[in] data The vec4[2] object to be packed
- *
- * @return The packed uvec4 object
- */
-highp uvec4 pack8_half(mediump vec4 data[2])
-{
- return uvec4(packHalf2x16(data[0].xy), packHalf2x16(data[0].zw),
- packHalf2x16(data[1].xy), packHalf2x16(data[1].zw));
-}
-
-/** Unpacking the uvec4 object to 8 half-precision (16-bits) floating point values and converting to a vec4[2] object
- *
- * @param[in] packed_data The uvec4 object to be unpacked
- *
- * @return The unpacked vec4[2] object
- */
-mediump vec4[2] unpack8_half(highp uvec4 packed_data)
-{
- return vec4[2](vec4(unpackHalf2x16(packed_data.x), unpackHalf2x16(packed_data.y)),
- vec4(unpackHalf2x16(packed_data.z), unpackHalf2x16(packed_data.w)));
-}
-
-/** Unpacking the uvec2[3] object to 12 half-precision (16-bits) floating point values and converting to a vec4[3] object
- *
- * @param[in] packed_data The uvec2[3] object to be unpacked
- *
- * @return The unpacked vec4[3] object
- */
-mediump vec4[3] unpack12_half(highp uvec2[3] packed_data)
-{
- return vec4[3](vec4(unpackHalf2x16(packed_data[0].x), unpackHalf2x16(packed_data[0].y)),
- vec4(unpackHalf2x16(packed_data[1].x), unpackHalf2x16(packed_data[1].y)),
- vec4(unpackHalf2x16(packed_data[2].x), unpackHalf2x16(packed_data[2].y)));
-}
-
-// For half-precision (16-bits) floating point packed into a "uint" element
-#define LOAD_UNPACK2_HALF(tensor_ptr, offset) unpackHalf2x16(uint(LOAD(tensor_ptr, offset)))
-#define STORE_PACK2_HALF(tensor_ptr, offset, data) STORE(tensor_ptr, offset, packHalf2x16(data))
-#define LOAD_UNPACK2_CURRENT_ITEM_HALF(tensor_ptr, tensor_iter) LOAD_UNPACK2_HALF(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter))
-#define STORE_PACK2_CURRENT_ITEM_HALF(tensor_ptr, tensor_iter, data) STORE_PACK2_HALF(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter), data)
-
-#define VLOAD2_UNPACK4_HALF(tensor_ptr, offset) unpack4_half(VLOAD2(uvec2, tensor_ptr, offset))
-#define VSTORE2_PACK4_HALF(tensor_ptr, offset, data) VSTORE2(tensor_ptr, offset, pack4_half(data))
-#define VLOAD2_UNPACK4_CURRENT_ITEM_HALF(tensor_ptr, tensor_iter) VLOAD2_UNPACK4_HALF(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter))
-#define VSTORE2_PACK4_CURRENT_ITEM_HALF(tensor_ptr, tensor_iter, data) VSTORE2_PACK4_HALF(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter), data)
-
-#define VLOAD3_UNPACK6_HALF(tensor_ptr, offset) unpack6_half(VLOAD3(uvec3, tensor_ptr, offset))
-#define VLOAD3_UNPACK6_CURRENT_ITEM_HALF(tensor_ptr, tensor_iter) VLOAD3_UNPACK6_HALF(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter))
-
-#define VLOAD4_UNPACK8_HALF(tensor_ptr, offset) unpack8_half(VLOAD4(uvec4, tensor_ptr, offset))
-#define VSTORE4_PACK8_HALF(tensor_ptr, offset, data) VSTORE4(tensor_ptr, offset, pack8_half(data))
-#define VLOAD4_UNPACK8_CURRENT_ITEM_HALF(tensor_ptr, tensor_iter) VLOAD4_UNPACK8_HALF(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter))
-#define VSTORE4_PACK8_CURRENT_ITEM_HALF(tensor_ptr, tensor_iter, data) VSTORE4_PACK8_HALF(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter), data)
-
-// For half-precision (16-bits) floating point packed into a "uvec2" element
-#define LOAD_UNPACK4_HALF(tensor_ptr, offset) unpack4_half(uvec2(LOAD(tensor_ptr, offset)))
-#define STORE_PACK4_HALF(tensor_ptr, offset, data) STORE(tensor_ptr, offset, pack4_half(data))
-#define LOAD_UNPACK4_CURRENT_ITEM_HALF(tensor_ptr, tensor_iter) LOAD_UNPACK4_HALF(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter))
-#define STORE_PACK4_CURRENT_ITEM_HALF(tensor_ptr, tensor_iter, data) STORE_PACK4_HALF(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter), data)
-
-#define VLOAD2_UNPACK8_HALF(tensor_ptr, offset) unpack8_half(VLOAD2(uvec4, tensor_ptr, offset))
-#define VSTORE2_PACK8_HALF(tensor_ptr, offset, data) VSTORE2(tensor_ptr, offset, pack8_half(data))
-#define VLOAD2_UNPACK8_CURRENT_ITEM_HALF(tensor_ptr, tensor_iter) VLOAD2_UNPACK8_HALF(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter))
-#define VSTORE2_PACK8_CURRENT_ITEM_HALF(tensor_ptr, tensor_iter, data) VSTORE2_PACK8_HALF(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter), data)
-
-#define VLOAD3_UNPACK12_HALF(tensor_ptr, offset) unpack12_half(VLOAD3(uvec2[3], tensor_ptr, offset))
-#define VLOAD3_UNPACK12_CURRENT_ITEM_HALF(tensor_ptr, tensor_iter) VLOAD3_UNPACK12_HALF(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter))
-
-// For half-precision (16-bits) floating point packed into a "uvec4" element
-#define LOAD_UNPACK8_HALF(tensor_ptr, offset) unpack8_half(uvec4(LOAD(tensor_ptr, offset)))
-#define STORE_PACK8_HALF(tensor_ptr, offset, data) STORE(tensor_ptr, offset, pack8_half(data))
-#define LOAD_UNPACK8_CURRENT_ITEM_HALF(tensor_ptr, tensor_iter) LOAD_UNPACK8_HALF(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter))
-#define STORE_PACK8_CURRENT_ITEM_HALF(tensor_ptr, tensor_iter, data) STORE_PACK8_HALF(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter), data)
-
-/** Converting the uvec4 object to 4 low-precision uint values and packing into a uint object
- *
- * @param[in] data The uvec4 object to be packed
- *
- * @return The packed uint object
- */
-highp uint pack4_u8(lowp uvec4 data)
-{
- highp uint r = uint(0);
-
- for(int i = 0; i < 4; i++)
- {
- r |= data[i] << uint(i * 8);
- }
-
- return r;
-}
-
-/** Unpacking the uint object to 4 low-precision uint values and converting to a uvec4 object
- *
- * @param[in] packed_data The uint object to be unpacked
- *
- * @return The unpacked uvec4 object
- */
-lowp uvec4 unpack4_u8(highp uint packed_data)
-{
- lowp uvec4 uvec;
-
- for(int i = 0; i < 4; i++)
- {
- uvec[i] = (packed_data >> uint(i * 8)) & uint(0xFF);
- }
-
- return uvec;
-}
-
-#define LOAD_UNPACK4_U8(tensor_ptr, offset) unpack4_u8(uint(LOAD(tensor_ptr, offset)))
-#define STORE_PACK4_U8(tensor_ptr, offset, data) STORE(tensor_ptr, offset, pack4_u8(data))
-#define LOAD_UNPACK4_CURRENT_ITEM_U8(tensor_ptr, tensor_iter) LOAD_UNPACK4_U8(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter))
-#define STORE_PACK4_CURRENT_ITEM_U8(tensor_ptr, tensor_iter, data) STORE_PACK4_U8(tensor_ptr, CURRENT_ITEM_OFFSET(tensor_iter), data)
-
-#endif // ARM_COMPUTE_HELPER_CS_H
diff --git a/src/core/GLES_COMPUTE/cs_shaders/normalization_layer.cs b/src/core/GLES_COMPUTE/cs_shaders/normalization_layer.cs
deleted file mode 100644
index a5ec68c0c5..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/normalization_layer.cs
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2017 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-/** Apply cross map normalization and in map normalization
- *
- * @note Alpha parameter / norm_size should be given as a preprocessor argument using "#define COEFF x"
- * @note BETA parameter in the normalization equation should be given as a preprocessor argument using "#define BETA x"
- * @note KAPPA parameter in the normalization equation should be given as a preprocessor argument using "#define KAPPA x"
- * @note Number of elements on the right or left side to normalize across should be given as a preprocessor argument using "#define RADIUS x"
- *
- * @param[in] src1_ptr Pointer to the first source tensor. Supported data types: F32
- * @param[in] src1_attrs The attributes of the first source tensor
- * @param[in] src2_ptr Pointer to the second source tensor. Supported data types: Same as @p src1_ptr
- * @param[in] src2_attrs The attributes of the second source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: Same as @p src1_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src1_attrs;
- Tensor3DAttributes src2_attrs;
- Tensor3DAttributes dst_attrs;
-};
-TENSOR_DECLARATION(1, src1Buffer, float, src1_ptr, src1_shift, 2, readonly);
-TENSOR_DECLARATION(2, src2Buffer, float, src2_ptr, src2_shift, 2, readonly);
-TENSOR_DECLARATION(3, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-
-#ifdef CROSS_MAP
-void main(void)
-{
- Tensor3DIterator src1_iter = CONVERT_TO_TENSOR3D_ITERATOR(src1_attrs, src1_shift);
- Tensor3DIterator src2_iter = CONVERT_TO_TENSOR3D_ITERATOR(src2_attrs, src2_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- float acc = 0.0;
-
- int num_of_slices = int(gl_NumWorkGroups.z * gl_WorkGroupSize.z);
- int current_slice = int(gl_GlobalInvocationID.z);
-
- int left_slice = max(current_slice - int(RADIUS), int(0));
- int right_slice = min(current_slice + int(RADIUS), int(num_of_slices - 1));
-
- for(int i = left_slice; i <= right_slice; i++)
- {
- acc += LOAD(src2_ptr, TENSOR3D_OFFSET(src2_iter, 0, 0, i - current_slice));
- }
-
- float normalized = pow(float(KAPPA) + float(COEFF) * acc, float(BETA));
-
- float normalized_pixel = (LOAD_CURRENT_ITEM(src1_ptr, src1_iter)) / normalized;
-
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, normalized_pixel);
-}
-
-#elif defined(IN_MAP_1D)
-void main(void)
-{
- Tensor3DIterator src1_iter = CONVERT_TO_TENSOR3D_ITERATOR(src1_attrs, src1_shift);
- Tensor3DIterator src2_iter = CONVERT_TO_TENSOR3D_ITERATOR(src2_attrs, src2_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- float acc = 0.0;
-
- int num_of_items_x = int(gl_NumWorkGroups.x * gl_WorkGroupSize.x);
- int current_pos = int(gl_GlobalInvocationID.x);
-
- int left_pos = max(current_pos - int(RADIUS), int(0));
- int right_pos = min(current_pos + int(RADIUS), int(num_of_items_x + -1));
-
- for(int i = left_pos; i <= right_pos; i++)
- {
- acc += LOAD(src2_ptr, TENSOR3D_OFFSET(src2_iter, i - current_pos, 0, 0));
- }
-
- float normalized = pow(float(KAPPA) + float(COEFF) * acc, float(BETA));
-
- float normalized_pixel = (LOAD_CURRENT_ITEM(src1_ptr, src1_iter)) / normalized;
-
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, normalized_pixel);
-}
-#endif /*CROSS_MAP*/
diff --git a/src/core/GLES_COMPUTE/cs_shaders/normalize_planar_yuv_layer.cs b/src/core/GLES_COMPUTE/cs_shaders/normalize_planar_yuv_layer.cs
deleted file mode 100644
index 6a46845d79..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/normalize_planar_yuv_layer.cs
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2017 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-precision mediump float;
-
-/** Apply normalize_planar_yuv layer.
- *
- * @param[in] src_ptr Pointer to the first source tensor. Supported data types: F16
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p src_ptr
- * @param[in] mean_attrs The attributes of the mean tensor
- * @param[in] sd_ptr Standard deviation values tensor,pointer to the sd tensor. Supported data types: same as @p src_ptr
- * @param[in] sd_attrs The attributes of the sd tensor
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes dst_attrs;
- VectorAttributes mean_attrs;
- VectorAttributes sd_attrs;
-};
-
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-TENSOR_DECLARATION(3, meanBuffer, uvec2, mean_ptr, mean_shift, 3, readonly);
-TENSOR_DECLARATION(4, sdBuffer, uvec2, sd_ptr, sd_shift, 3, readonly);
-
-void main(void)
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
- VectorIterator mean_iter = CONVERT_TO_VECTOR_ITERATOR(mean_attrs, mean_shift);
- VectorIterator sd_iter = CONVERT_TO_VECTOR_ITERATOR(sd_attrs, sd_shift);
-
- vec4 unpacked_s[3];
- vec4 tmp;
- vec4 result;
-
- uint current_slice = gl_GlobalInvocationID.z;
- unpacked_s[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(src_ptr, src_iter);
- unpacked_s[1] = LOAD_UNPACK4_HALF(mean_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(mean_iter, current_slice * mean_attrs.stride_x));
- unpacked_s[2] = LOAD_UNPACK4_HALF(sd_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(sd_iter, current_slice * sd_attrs.stride_x));
-
- if((current_slice % uint(4)) == uint(0))
- {
- tmp = unpacked_s[0] - unpacked_s[1].x;
- result = tmp / unpacked_s[2].x;
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
- }
- else if((current_slice % uint(4)) == uint(1))
- {
- tmp = unpacked_s[0] - unpacked_s[1].y;
- result = tmp / unpacked_s[2].y;
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
- }
- else if((current_slice % uint(4)) == uint(2))
- {
- tmp = unpacked_s[0] - unpacked_s[1].z;
- result = tmp / unpacked_s[2].z;
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
- }
- else
- {
- tmp = unpacked_s[0] - unpacked_s[1].w;
- result = tmp / unpacked_s[2].w;
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
- }
-}
diff --git a/src/core/GLES_COMPUTE/cs_shaders/pixelwise_mul_float.cs b/src/core/GLES_COMPUTE/cs_shaders/pixelwise_mul_float.cs
deleted file mode 100644
index 936839f97e..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/pixelwise_mul_float.cs
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2017 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-/** Performs a pixelwise multiplication with float scale of float inputs.
- *
- * @param[in] src1_ptr Pointer to the first source tensor. Supported data types: F32
- * @param[in] src1_attrs The attributes of the first source tensor
- * @param[in] src2_ptr Pointer to the second source tensor. Supported data types: Same as @p src1_ptr
- * @param[in] src2_attrs The attributes of the second source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: Same as @p src1_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- * @param[in] scale Float scaling factor. Supported data types: F32
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src1_attrs;
- Tensor3DAttributes src2_attrs;
- Tensor3DAttributes dst_attrs;
-};
-TENSOR_DECLARATION(1, src1Buffer, float, src1_ptr, src1_shift, 2, readonly);
-TENSOR_DECLARATION(2, src2Buffer, float, src2_ptr, src2_shift, 2, readonly);
-TENSOR_DECLARATION(3, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-
-void main()
-{
- // Get pixels pointer
- Tensor3DIterator src1_iter = CONVERT_TO_TENSOR3D_ITERATOR(src1_attrs, src1_shift);
- Tensor3DIterator src2_iter = CONVERT_TO_TENSOR3D_ITERATOR(src2_attrs, src2_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- float result = LOAD_CURRENT_ITEM(src1_ptr, src1_iter) * LOAD_CURRENT_ITEM(src2_ptr, src2_iter) * float(SCALE);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, result);
-}
diff --git a/src/core/GLES_COMPUTE/cs_shaders/pooling_layer.cs b/src/core/GLES_COMPUTE/cs_shaders/pooling_layer.cs
deleted file mode 100644
index 6ca4265056..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/pooling_layer.cs
+++ /dev/null
@@ -1,1052 +0,0 @@
-/*
- * Copyright (c) 2017 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-#if defined(DATA_TYPE_FP16)
-precision mediump float;
-#endif // DATA_TYPE_FP16
-
-/** Performs a pooling function
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- * @note The pool size must be passed at compile time using "#define POOLING_LAYER_n". e.g. "#define POOLING_LAYER_2"
- * n must be one of these: 2, 3, 7, N
- * Pool size must be passed using POOL_SIZE if POOLING_LAYER_N is defined. e.g. POOL_SIZE=13;
- * @note In case of average pooling the following information must be passed at compile time:
- * POOL_AVG must be provided otherwise max pooling will be performed.
- * MAX_WIDTH and MAX_HEIGHT which are the maximum accessible indeces in x and y dimensions (width + pad)
- * STRIDE_X and STRIDE_Y which are the steps of the window along the x and y directions
- * PAD_X and PAD_Y which are the pooling paddings in x and y dimension
- *
- * @param[in] src_ptr Pointer to the source image. Supported data types: F32/F16
- * @param[in] src_attrs The attributes of the source image
- * @param[out] dst_ptr Pointer to the destination image. Supported data types: same as @p src_ptr
- * @param[in] src_attrs The attributes of the destination image
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes dst_attrs;
-};
-
-// Common definitions
-#if defined(POOL_AVG) || defined(POOL_L2)
-#define POOL_OP(res, a, b) ((res) = (a) + (b))
-#define POOL_OP_float(res, a, b) (res = a + b)
-#define POOL_OP_vec2(res, a, b) ((res) = (a) + (b))
-#else /* defined(POOL_AVG) || defined(POOL_L2) */
-#define POOL_OP(res, a, b) \
- (res) = (a); \
- if(isnan(a.x) || (a.x < b.x)) \
- { \
- res.x = b.x; \
- } \
- if(isnan(a.y) || (a.y < b.y)) \
- { \
- res.y = b.y; \
- } \
- if(isnan(a.z) || (a.z < b.z)) \
- { \
- res.z = b.z; \
- } \
- if(isnan(a.w) || (a.w < b.w)) \
- { \
- res.w = b.w; \
- }
-#define POOL_OP_float(res, a, b) \
- (res) = (a); \
- if(isnan(a) || (a < b)) \
- { \
- res = b; \
- }
-#define POOL_OP_vec2(res, a, b) \
- (res) = (a); \
- if(isnan(a.x) || (a.x < b.x)) \
- { \
- res.x = b.x; \
- } \
- if(isnan(a.y) || (a.y < b.y)) \
- { \
- res.y = b.y; \
- }
-#endif /* defined(POOL_AVG) || defined(POOL_L2) */
-
-#if defined(POOL_L2)
-#define POW2_OP(x, vec_size) ((x) * (x))
-#else /* defined(POOL_L2) */
-#define POW2_OP(x, vec_size) (x)
-#endif /* defined(POOL_L2) */
-
-#define DIV_OP(x, y) (x * (1.f / y))
-#define SQRT_OP(x) sqrt((x))
-
-#if defined(DATA_TYPE_FP32)
-
-float calculate_max(const int, Tensor3DIterator, const int, const int, const int, const int, const int, const int);
-float calculate_avg(const int, Tensor3DIterator, const int, const int, const int, const int, const int, const int);
-
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-
-#if defined(POOL_SIZE)
-// Set the initial value for the pooling operation accordingly with the data type
-#if defined(POOL_AVG) || defined(POOL_L2)
-#define INITIAL_VALUE 0.0f
-#else /* defined(POOL_AVG) || defined(POOL_L2) */
-#define INITIAL_VALUE -3.402823466385289e+38
-#endif // POOL_AVG
-#endif //POOL_SIZE
-
-float calculate_max(const int pool_size, Tensor3DIterator src_iter, const int upper_bound_w, const int upper_bound_h, const int pad_x, const int pad_y, const int stride_x, const int stride_y)
-{
- int start_x = int(gl_GlobalInvocationID.x) * stride_x - pad_x;
- int start_y = int(gl_GlobalInvocationID.y) * stride_y - pad_y;
- int end_x = int(min(start_x + pool_size, upper_bound_w));
- int end_y = int(min(start_y + pool_size, upper_bound_h));
-
- float data_max;
- data_max = LOAD_CURRENT_ITEM(src_ptr, src_iter);
-
- for(int i = 0; (start_y + i) < end_y; ++i)
- {
- for(int j = 0; (start_x + j) < end_x; ++j)
- {
- float data = LOAD(src_ptr, TENSOR3D_OFFSET(src_iter, j, i, 0));
- POOL_OP_float(data_max, data_max, data);
- }
- }
-
- return data_max;
-}
-
-float calculate_avg(const int pool_size, Tensor3DIterator src_iter, const int upper_bound_w, const int upper_bound_h, const int pad_x, const int pad_y, const int stride_x, const int stride_y)
-{
- int start_x = int(gl_GlobalInvocationID.x) * stride_x - pad_x;
- int start_y = int(gl_GlobalInvocationID.y) * stride_y - pad_y;
- int end_x = int(min(start_x + pool_size, upper_bound_w));
- int end_y = int(min(start_y + pool_size, upper_bound_h));
-
- float data_total = 0.0f;
- for(int i = 0; (start_x + i) < end_x; i++)
- {
- for(int j = 0; (start_y + j) < end_y; ++j)
- {
- float data = LOAD(src_ptr, TENSOR3D_OFFSET(src_iter, i, j, 0));
- if(isnan(data))
- {
- data = 0.0f;
- }
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data = POW2_OP(data, 1);
-#endif /* defined(POOL_L2) */
- data_total = data_total + data;
- }
- }
-
-#if defined(EXCLUDE_PADDING)
- start_x = max(0, start_x);
- start_y = max(0, start_y);
-#endif /* defined(EXCLUDE_PADDING) */
-
- return data_total / float((end_y - start_y) * (end_x - start_x));
-}
-
-#if defined(POOLING_LAYER_2) || defined(POOLING_LAYER_3) || defined(POOLING_LAYER_7)
-
-#if defined(POOLING_LAYER_2)
-#define POOL_SIZE 2
-#elif defined(POOLING_LAYER_3)
-#define POOL_SIZE 3
-#elif defined(POOLING_LAYER_7)
-#define POOL_SIZE 7
-#else // POOLING_LAYER_n
-#error Please define POOLING_LAYER_N instead.
-#endif // POOLING_LAYER_n
-
-void main(void)
-{
- // Get pixels pointer
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- //Load and calculate data
- float res;
-#if defined(POOL_AVG) || defined(POOL_L2)
- res = calculate_avg(POOL_SIZE, src_iter, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y);
-#else /*POOL_AVG*/
- res = calculate_max(POOL_SIZE, src_iter, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y);
-#endif /*POOL_AVG*/
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- res = SQRT_OP(res);
-#endif /* defined(POOL_L2) */
-
- // Store result
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, res);
-}
-
-#elif defined(POOLING_LAYER_3_OPTIMIZED)
-
-#define POOLING3x3_STRIDE1(res, input_ptr, input_iter) \
- vec4 data00 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0)); \
- vec2 data01 = VLOAD2(vec2, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0) + uint(4)); \
- vec4 data10 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0)); \
- vec2 data11 = VLOAD2(vec2, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0) + uint(4)); \
- vec4 data20 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0)); \
- vec2 data21 = VLOAD2(vec2, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0) + uint(4)); \
- data00 = POW2_OP(data00, 4); \
- data01 = POW2_OP(data01, 2); \
- data10 = POW2_OP(data10, 4); \
- data11 = POW2_OP(data11, 2); \
- data20 = POW2_OP(data20, 4); \
- data21 = POW2_OP(data21, 2); \
- \
- vec4 values000; \
- vec4 values001; \
- vec4 values010; \
- vec4 values100; \
- vec4 values101; \
- vec4 values11; \
- vec4 values200; \
- vec4 values201; \
- vec4 values21; \
- values000.xyzw = data00.xyzy; \
- values001.xyzw = data00.zwzw; \
- values010.x = data01.x; \
- values010.y = data00.w; \
- values010.zw = data01.xy; \
- values100.xyzw = data10.xyzy; \
- values101.xyzw = data10.zwzw; \
- values11.x = data11.x; \
- values11.y = data10.w; \
- values11.zw = data11.xy; \
- values200.xyzw = data20.xyzy; \
- values201.xyzw = data20.zwzw; \
- values21.x = data21.x; \
- values21.y = data20.w; \
- values21.zw = data21.xy; \
- POOL_OP(values000.xyzw, values000.xyzw, values100.xyzw); \
- POOL_OP(values001.xyzw, values001.xyzw, values101.xyzw); \
- POOL_OP(values010.xyzw, values010.xyzw, values11.xyzw); \
- POOL_OP(values000.xyzw, values000.xyzw, values200.xyzw); \
- POOL_OP(values001.xyzw, values001.xyzw, values201.xyzw); \
- POOL_OP(values010.xyzw, values010.xyzw, values21.xyzw); \
- POOL_OP(res.xyzw, vec4(values000.xw, values001.z, values010.y), vec4(values000.y, values001.xw, values010.z)); \
- POOL_OP(res.xyzw, res.xyzw, vec4(values000.z, values001.y, values010.xw))
-
-#define POOLING3x3_STRIDE2(res, input_ptr, input_iter) \
- vec4 data000 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0)); \
- vec4 data001 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0) + uint(4)); \
- float data010 = LOAD(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0) + uint(8)); \
- vec4 data100 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0)); \
- vec4 data101 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0) + uint(4)); \
- float data11 = LOAD(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0) + uint(8)); \
- vec4 data200 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0)); \
- vec4 data201 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0) + uint(4)); \
- float data21 = LOAD(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0) + uint(8)); \
- data000 = POW2_OP(data000, 4); \
- data001 = POW2_OP(data001, 4); \
- data010 = POW2_OP(data010, 1); \
- data100 = POW2_OP(data100, 4); \
- data101 = POW2_OP(data101, 4); \
- data11 = POW2_OP(data11, 1); \
- data200 = POW2_OP(data200, 4); \
- data201 = POW2_OP(data201, 4); \
- data21 = POW2_OP(data21, 1); \
- \
- vec4 values000; \
- vec4 values001; \
- vec4 values010; \
- vec4 values100; \
- vec4 values101; \
- vec4 values11; \
- vec4 values200; \
- vec4 values201; \
- vec4 values21; \
- values000.xyzw = data000.xyzz; \
- values001.xyzw = vec4(data000.w, data001.xxy); \
- values010.xyzw = vec4(data001.zzw, data010); \
- values100.xyzw = data100.xyzz; \
- values101.xyzw = vec4(data100.w, data101.xxy); \
- values11.xyzw = vec4(data101.zzw, data11); \
- values200.xyzw = data200.xyzz; \
- values201.xyzw = vec4(data200.w, data201.xxy); \
- values21.xyzw = vec4(data201.zzw, data21); \
- POOL_OP(values000.xyzw, values000.xyzw, values100.xyzw); \
- POOL_OP(values001.xyzw, values001.xyzw, values101.xyzw); \
- POOL_OP(values010.xyzw, values010.xyzw, values11.xyzw); \
- POOL_OP(values000.xyzw, values000.xyzw, values200.xyzw); \
- POOL_OP(values001.xyzw, values001.xyzw, values201.xyzw); \
- POOL_OP(values010.xyzw, values010.xyzw, values21.xyzw); \
- POOL_OP(res.xyzw, vec4(values000.xw, values001.z, values010.y), vec4(values000.y, values001.xw, values010.z)); \
- POOL_OP(res.xyzw, res.xyzw, vec4(values000.z, values001.y, values010.xw))
-
-#define POOLING3x3_STRIDE3(res, input_ptr, input_iter) \
- vec4 data000 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0)); \
- vec4 data001 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0) + uint(4)); \
- vec4 data010 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0) + uint(8)); \
- vec4 data100 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0)); \
- vec4 data101 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0) + uint(4)); \
- vec4 data11 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0) + uint(8)); \
- vec4 data200 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0)); \
- vec4 data201 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0) + uint(4)); \
- vec4 data21 = VLOAD4(vec4, input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0) + uint(8)); \
- data000 = POW2_OP(data000, 4); \
- data001 = POW2_OP(data001, 4); \
- data010 = POW2_OP(data010, 4); \
- data100 = POW2_OP(data100, 4); \
- data101 = POW2_OP(data101, 4); \
- data11 = POW2_OP(data11, 4); \
- data200 = POW2_OP(data200, 4); \
- data201 = POW2_OP(data201, 4); \
- data21 = POW2_OP(data21, 4); \
- \
- POOL_OP(data000.xyzw, data000.xyzw, data100.xyzw); \
- POOL_OP(data001.xyzw, data001.xyzw, data101.xyzw); \
- POOL_OP(data010.xyzw, data010.xyzw, data11.xyzw); \
- POOL_OP(data000.xyzw, data000.xyzw, data200.xyzw); \
- POOL_OP(data001.xyzw, data001.xyzw, data201.xyzw); \
- POOL_OP(data010.xyzw, data010.xyzw, data21.xyzw); \
- POOL_OP(res.xyzw, vec4(data000.xw, data001.z, data010.y), vec4(data000.y, data001.xw, data010.z)); \
- POOL_OP(res.xyzw, res.xyzw, vec4(data000.z, data001.y, data010.xw))
-
-void main(void)
-{
- // Get pixels pointer
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- vec4 res;
- // Perform pooling 3x3 for 4 output elements
-#if STRIDE_X == 1
- POOLING3x3_STRIDE1(res, src_ptr, src_iter);
-#elif STRIDE_X == 2
- POOLING3x3_STRIDE2(res, src_ptr, src_iter);
-#elif STRIDE_X == 3
- POOLING3x3_STRIDE3(res, src_ptr, src_iter);
-#endif /*STRIDE_X == 1*/
-
- // Divide by pool region in case of average pooling
-#if defined(POOL_AVG) || defined(POOL_L2)
- ivec4 start_x = ((ivec4(int(gl_GlobalInvocationID.x) * 4) + ivec4(0, 1, 2, 3)) * (ivec4(STRIDE_X))) - (ivec4(PAD_X));
- int start_y = int(gl_GlobalInvocationID.y) * STRIDE_Y - PAD_Y;
- ivec4 end_x = min((start_x + (ivec4(3))), (ivec4(MAX_WIDTH)));
- int end_y = min((start_y + 3), MAX_HEIGHT);
-#if defined(EXCLUDE_PADDING)
- start_x = max(ivec4(0), start_x);
- start_y = max(0, start_y);
-#endif /* defined(EXCLUDE_PADDING) */
- res *= (vec4((1.f)) / vec4((ivec4(end_y - start_y)) * (end_x - start_x)));
-#endif /*POOL_AVG*/
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- res = SQRT_OP(res);
-#endif /* defined(POOL_L2) */
-
- VSTORE4_CURRENT_ITEM(dst_ptr, dst_iter, res);
-}
-
-#elif defined(POOLING_LAYER_N)
-
-void main(void)
-{
- // Get pixels pointer
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- vec4 vdata0 = vec4(INITIAL_VALUE);
- vec4 vdata1 = vec4(INITIAL_VALUE);
- float sdata = float(INITIAL_VALUE);
-
- for(int y = 0; y < int(POOL_SIZE); y++)
- {
- int x = 0;
- for(; x <= (int(POOL_SIZE) - 8); x += 8)
- {
- vec4 data2 = VLOAD4(vec4, src_ptr, TENSOR3D_OFFSET(src_iter, x, y, 0));
- vec4 data3 = VLOAD4(vec4, src_ptr, TENSOR3D_OFFSET(src_iter, x, y, 0) + uint(4));
-
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data2 *= data2;
- data3 *= data3;
-#endif /* defined(POOL_L2) */
-
- POOL_OP(vdata0, vdata0, data2);
- POOL_OP(vdata1, vdata1, data3);
- }
-
- // Leftover
- for(; x < int(POOL_SIZE); ++x)
- {
- float data4 = LOAD(src_ptr, TENSOR3D_OFFSET(src_iter, x, y, 0));
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data4 *= data4;
-#endif /* defined(POOL_L2) */
- POOL_OP_float(sdata, sdata, data4);
- }
- }
-
- //Reduce result
- vec4 reduce4;
- POOL_OP(reduce4, vdata0.xyzw, vdata1.xyzw);
- vec2 reduce2;
- POOL_OP_vec2(reduce2, reduce4.xy, reduce4.zw);
- float res;
- POOL_OP_float(res, reduce2.x, reduce2.y);
- POOL_OP_float(res, res, sdata);
-
-#if defined(POOL_AVG) || defined(POOL_L2)
- {
- // Divide by pool region in case of average pooling
- int start_x = int(gl_GlobalInvocationID.x) * STRIDE_X - PAD_X;
- int start_y = int(gl_GlobalInvocationID.y) * STRIDE_Y - PAD_Y;
- int end_x = int(min(start_x + POOL_SIZE, MAX_WIDTH));
- int end_y = int(min(start_y + POOL_SIZE, MAX_HEIGHT));
-#if defined(EXCLUDE_PADDING)
- start_x = max(0, start_x);
- start_y = max(0, start_y);
-#endif /* defined(EXCLUDE_PADDING) */
- float res1 = float((end_y - start_y) * (end_x - start_x));
- res = DIV_OP(res, res1);
- }
-#endif /* defined(POOL_AVG) || defined(POOL_L2) */
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- res = SQRT_OP(res);
-#endif /* defined(POOL_L2) */
-
- // Store result
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, res);
-}
-#endif // POOLING_LAYER_N
-
-#elif defined(DATA_TYPE_FP16)
-
-vec2 calculate_max(const int, Tensor3DIterator, const int, const int, const int, const int, const int, const int);
-vec2 calculate_avg(const int, Tensor3DIterator, const int, const int, const int, const int, const int, const int);
-
-TENSOR_DECLARATION(1, srcBuffer, uint, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uint, dst_ptr, dst_shift, 2, writeonly);
-
-#if defined(POOL_SIZE)
-// Set the initial value for the pooling operation accordingly with the data type
-#if defined(POOL_AVG) || defined(POOL_L2)
-#define INITIAL_VALUE 0.0f
-#else /* defined(POOL_AVG) || defined(POOL_L2) */
-#define INITIAL_VALUE -65504.0f
-#endif //POOL_AVG
-#endif //POOL_SIZE
-
-vec2 calculate_max(const int pool_size, Tensor3DIterator src_iter, const int upper_bound_w, const int upper_bound_h, const int pad_x, const int pad_y, const int stride_x, const int stride_y)
-{
- int start_x1 = int(gl_GlobalInvocationID.x) * stride_x - pad_x;
- int start_y1 = int(gl_GlobalInvocationID.y) * stride_y - pad_y;
- int end_x1 = int(min(start_x1 + pool_size, upper_bound_w));
- int end_y1 = int(min(start_y1 + pool_size, upper_bound_h));
-
- int start_x2 = start_x1 + stride_x;
- int start_y2 = start_y1;
- int end_x2 = int(min(start_x2 + pool_size, upper_bound_w));
- int end_y2 = int(min(start_y2 + pool_size, upper_bound_h));
-
- //Initialize maximum
- vec2 data_max = vec2(0);
-
- //Load and Set initial maximum1
- vec2 data_init1 = LOAD_UNPACK2_CURRENT_ITEM_HALF(src_ptr, src_iter);
- data_max.x = data_init1.x;
-
- //Load and Set initial maximum2
- if(end_x1 < upper_bound_w)
- {
- if((stride_x % 2) == 0)
- {
- vec2 data_init2 = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, stride_x, 0, 0));
- data_max.y = data_init2.x;
- }
- else
- {
- vec2 data_init2 = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, stride_x - 1, 0, 0));
- data_max.y = data_init2.y;
- }
- }
-
- for(int i = 0; (start_y1 + i) < end_y1; i++)
- for(int j = 0; (start_x1 + j) < end_x1; j = j + 2)
- {
- //Calculate maximum1
- if((start_x1 + j + 1) < end_x1)
- {
- vec2 data1 = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, j, i, 0));
- float data_mr1;
- POOL_OP_float(data_mr1, data1.x, data1.y);
- POOL_OP_float(data_max.x, data_max.x, data_mr1);
- }
- else
- {
- vec2 data1 = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, j, i, 0));
- POOL_OP_float(data_max.x, data_max.x, data1.x);
- }
-
- //Calculate maximum2
- if((start_x2 + j) < end_x2 && end_x1 < upper_bound_w)
- {
- if((stride_x % 2) == 0)
- {
- vec2 data2 = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, (j + stride_x), i, 0));
-
- if((start_x2 + j + 1) < end_x2)
- {
- float data_mr2;
- POOL_OP_float(data_mr2, data2.x, data2.y);
- POOL_OP_float(data_max.y, data_max.y, data_mr2);
- }
- else
- {
- POOL_OP_float(data_max.y, data_max.y, data2.x);
- }
- }
- else
- {
- vec2 data2 = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, (j + stride_x - 1), i, 0));
- vec2 data3 = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, (j + stride_x + 1), i, 0));
- if((start_x2 + j + 1) < end_x2)
- {
- float data_mr2;
- POOL_OP_float(data_mr2, data3.x, data2.y);
- POOL_OP_float(data_max.y, data_max.y, data_mr2);
- }
- else
- {
- POOL_OP_float(data_max.y, data_max.y, data2.y);
- }
- }
- }
- }
- return data_max;
-}
-
-vec2 calculate_avg(const int pool_size, Tensor3DIterator src_iter, const int upper_bound_w, const int upper_bound_h, const int pad_x, const int pad_y, const int stride_x, const int stride_y)
-{
- int start_x1 = (2 * int(gl_GlobalInvocationID.x)) * stride_x - pad_x;
- int start_y1 = int(gl_GlobalInvocationID.y) * stride_y - pad_y;
- int end_x1 = int(min(start_x1 + pool_size, upper_bound_w));
- int end_y1 = int(min(start_y1 + pool_size, upper_bound_h));
-
- int start_x2 = start_x1 + stride_x;
- int start_y2 = start_y1;
- int end_x2 = int(min(start_x2 + pool_size, upper_bound_w));
- int end_y2 = int(min(start_y2 + pool_size, upper_bound_h));
-
- //Initialize sum
- float data_total1 = float(0);
- float data_total2 = float(0);
- for(int i = 0; (start_y1 + i) < end_y1; i++)
- for(int j = 0; (start_x1 + j) < end_x1; j = j + 2)
- {
- vec2 data1 = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, j, i, 0));
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data1 = POW2_OP(data1, 2);
-#endif /* defined(POOL_L2) */
- //Calculate sum1
- if((start_x1 + j + 1) < end_x1)
- {
- data_total1 = data_total1 + data1.x + data1.y;
- }
- else
- {
- data_total1 = data_total1 + data1.x;
- }
-
- //Calculate sum2
- if((start_x2 + j) < end_x2 && end_x1 <= upper_bound_w)
- {
- if((stride_x % 2) == 0)
- {
- vec2 data2 = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, (j + stride_x), i, 0));
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data2 = POW2_OP(data2, 2);
-#endif /* defined(POOL_L2) */
- if((start_x2 + j + 1) < end_x2)
- {
- data_total2 = data_total2 + data2.x + data2.y;
- }
- else
- {
- data_total2 = data_total2 + data2.x;
- }
- }
- else
- {
- vec2 data2 = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, (j + stride_x - 1), i, 0));
- vec2 data3 = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, (j + stride_x + 1), i, 0));
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data2 = POW2_OP(data2, 2);
- data3 = POW2_OP(data3, 2);
-#endif /* defined(POOL_L2) */
- if((start_x2 + j + 1) < end_x2)
- {
- data_total2 = data_total2 + data3.x + data2.y;
- }
- else
- {
- data_total2 = data_total2 + data2.y;
- }
- }
- }
- }
-#if defined(EXCLUDE_PADDING)
- start_x1 = max(0, start_x1);
- start_y1 = max(0, start_y1);
- start_x2 = max(0, start_x2);
- start_y2 = max(0, start_y2);
-#endif /* defined(EXCLUDE_PADDING) */
-
- //Calculate average
- vec2 data_avg;
- data_avg.x = data_total1 / float((end_y1 - start_y1) * (end_x1 - start_x1));
- data_avg.y = data_total2 / float((end_y2 - start_y2) * (end_x2 - start_x2));
-
- return data_avg;
-}
-
-#if defined(POOLING_LAYER_2) || defined(POOLING_LAYER_3) || defined(POOLING_LAYER_7)
-
-#if defined(POOLING_LAYER_2)
-#define POOL_SIZE 2
-#elif defined(POOLING_LAYER_3)
-#define POOL_SIZE 3
-#elif defined(POOLING_LAYER_7)
-#define POOL_SIZE 7
-#else // POOLING_LAYER_n
-#error Please define POOLING_LAYER_N instead.
-#endif // POOLING_LAYER_n
-
-void main(void)
-{
- // Get pixels pointer
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- //Load and calculate data
- vec2 data;
-#if defined(POOL_AVG) || defined(POOL_L2)
- data = calculate_avg(POOL_SIZE, src_iter, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y);
-#else /*POOL_AVG*/
- data = calculate_max(POOL_SIZE, src_iter, MAX_WIDTH, MAX_HEIGHT, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y);
-#endif /*POOL_AVG*/
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- data = SQRT_OP(data);
-#endif /* defined(POOL_L2) */
-
- // Store result
- STORE_PACK2_CURRENT_ITEM_HALF(dst_ptr, dst_iter, data);
-}
-
-#elif defined(POOLING_LAYER_3_OPTIMIZED)
-
-#define POOLING3x3_STRIDE1_fp16(res, input_ptr, input_iter) \
- vec4 data00 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0)); \
- vec2 data01 = LOAD_UNPACK2_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0) + uint(2)); \
- vec4 data10 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0)); \
- vec2 data11 = LOAD_UNPACK2_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0) + uint(2)); \
- vec4 data20 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0)); \
- vec2 data21 = LOAD_UNPACK2_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0) + uint(2)); \
- data00 = POW2_OP(data00, 4); \
- data01 = POW2_OP(data01, 2); \
- data10 = POW2_OP(data10, 4); \
- data11 = POW2_OP(data11, 2); \
- data20 = POW2_OP(data20, 4); \
- data21 = POW2_OP(data21, 2); \
- \
- vec4 values000; \
- vec4 values001; \
- vec4 values010; \
- vec4 values100; \
- vec4 values101; \
- vec4 values11; \
- vec4 values200; \
- vec4 values201; \
- vec4 values21; \
- values000.xyzw = data00.xyzy; \
- values001.xyzw = data00.zwzw; \
- values010.x = data01.x; \
- values010.y = data00.w; \
- values010.zw = data01.xy; \
- values100.xyzw = data10.xyzy; \
- values101.xyzw = data10.zwzw; \
- values11.x = data11.x; \
- values11.y = data10.w; \
- values11.zw = data11.xy; \
- values200.xyzw = data20.xyzy; \
- values201.xyzw = data20.zwzw; \
- values21.x = data21.x; \
- values21.y = data20.w; \
- values21.zw = data21.xy; \
- POOL_OP(values000.xyzw, values000.xyzw, values100.xyzw); \
- POOL_OP(values001.xyzw, values001.xyzw, values101.xyzw); \
- POOL_OP(values010.xyzw, values010.xyzw, values11.xyzw); \
- POOL_OP(values000.xyzw, values000.xyzw, values200.xyzw); \
- POOL_OP(values001.xyzw, values001.xyzw, values201.xyzw); \
- POOL_OP(values010.xyzw, values010.xyzw, values21.xyzw); \
- POOL_OP(res.xyzw, vec4(values000.xw, values001.z, values010.y), vec4(values000.y, values001.xw, values010.z)); \
- POOL_OP(res.xyzw, res.xyzw, vec4(values000.z, values001.y, values010.xw))
-
-#define POOLING3x3_STRIDE2_fp16(res, input_ptr, input_iter) \
- vec4 data000; \
- vec4 data001; \
- float data010; \
- vec4 data100; \
- vec4 data101; \
- float data11; \
- vec4 data200; \
- vec4 data201; \
- float data21; \
- vec2 datamiddle0; \
- vec2 datamiddle1; \
- vec2 datamiddle2; \
- data000 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0)); \
- data001 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0) + uint(2)); \
- datamiddle0 = LOAD_UNPACK2_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0) + uint(4)); \
- data010 = datamiddle0.x; \
- data100 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0)); \
- data101 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0) + uint(2)); \
- datamiddle1 = LOAD_UNPACK2_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0) + uint(4)); \
- data11 = datamiddle1.x; \
- data200 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0)); \
- data201 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0) + uint(2)); \
- datamiddle2 = LOAD_UNPACK2_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0) + uint(4)); \
- data21 = datamiddle2.x; \
- data000 = POW2_OP(data000, 4); \
- data001 = POW2_OP(data001, 4); \
- data010 = POW2_OP(data010, 1); \
- data100 = POW2_OP(data100, 4); \
- data101 = POW2_OP(data101, 4); \
- data11 = POW2_OP(data11, 1); \
- data200 = POW2_OP(data200, 4); \
- data201 = POW2_OP(data201, 4); \
- data21 = POW2_OP(data21, 1); \
- \
- vec4 values000; \
- vec4 values001; \
- vec4 values010; \
- vec4 values100; \
- vec4 values101; \
- vec4 values11; \
- vec4 values200; \
- vec4 values201; \
- vec4 values21; \
- values000.xyzw = data000.xyzz; \
- values001.xyzw = vec4(data000.w, data001.xxy); \
- values010.xyzw = vec4(data001.zzw, data010); \
- values100.xyzw = data100.xyzz; \
- values101.xyzw = vec4(data100.w, data101.xxy); \
- values11.xyzw = vec4(data101.zzw, data11); \
- values200.xyzw = data200.xyzz; \
- values201.xyzw = vec4(data200.w, data201.xxy); \
- values21.xyzw = vec4(data201.zzw, data21); \
- POOL_OP(values000.xyzw, values000.xyzw, values100.xyzw); \
- POOL_OP(values001.xyzw, values001.xyzw, values101.xyzw); \
- POOL_OP(values010.xyzw, values010.xyzw, values11.xyzw); \
- POOL_OP(values000.xyzw, values000.xyzw, values200.xyzw); \
- POOL_OP(values001.xyzw, values001.xyzw, values201.xyzw); \
- POOL_OP(values010.xyzw, values010.xyzw, values21.xyzw); \
- POOL_OP(res.xyzw, vec4(values000.xw, values001.z, values010.y), vec4(values000.y, values001.xw, values010.z)); \
- POOL_OP(res.xyzw, res.xyzw, vec4(values000.z, values001.y, values010.xw))
-
-#define POOLING3x3_STRIDE3_fp16(res, input_ptr, input_iter) \
- vec4 data000 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0)); \
- vec4 data001 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0) + uint(2)); \
- vec4 data010 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 0, 0) + uint(4)); \
- vec4 data100 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0)); \
- vec4 data101 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0) + uint(2)); \
- vec4 data11 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 1, 0) + uint(4)); \
- vec4 data200 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0)); \
- vec4 data201 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0) + uint(2)); \
- vec4 data21 = VLOAD2_UNPACK4_HALF(input_ptr, TENSOR3D_OFFSET(input_iter, 0, 2, 0) + uint(4)); \
- data000 = POW2_OP(data000, 4); \
- data001 = POW2_OP(data001, 4); \
- data010 = POW2_OP(data010, 4); \
- data100 = POW2_OP(data100, 4); \
- data101 = POW2_OP(data101, 4); \
- data11 = POW2_OP(data11, 4); \
- data200 = POW2_OP(data200, 4); \
- data201 = POW2_OP(data201, 4); \
- data21 = POW2_OP(data21, 4); \
- \
- POOL_OP(data000.xyzw, data000.xyzw, data100.xyzw); \
- POOL_OP(data001.xyzw, data001.xyzw, data101.xyzw); \
- POOL_OP(data010.xyzw, data010.xyzw, data11.xyzw); \
- POOL_OP(data000.xyzw, data000.xyzw, data200.xyzw); \
- POOL_OP(data001.xyzw, data001.xyzw, data201.xyzw); \
- POOL_OP(data010.xyzw, data010.xyzw, data21.xyzw); \
- POOL_OP(res.xyzw, vec4(data000.xw, data001.z, data010.y), vec4(data000.y, data001.xw, data010.z)); \
- POOL_OP(res.xyzw, res.xyzw, vec4(data000.z, data001.y, data010.xw))
-
-void main(void)
-{
- // Get pixels pointer
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- vec4 res;
- // Perform pooling 3x3 for 4 output elements
-#if STRIDE_X == 1
- POOLING3x3_STRIDE1_fp16(res, src_ptr, src_iter);
-#elif STRIDE_X == 2
- POOLING3x3_STRIDE2_fp16(res, src_ptr, src_iter);
-#elif STRIDE_X == 3
- POOLING3x3_STRIDE3_fp16(res, src_ptr, src_iter);
-#endif /*STRIDE_X == 1*/
-
- // Divide by pool region in case of average pooling
-#if defined(POOL_AVG) || defined(POOL_L2)
- ivec4 start_x = ((ivec4(int(gl_GlobalInvocationID.x) * 4) + ivec4(0, 1, 2, 3)) * (ivec4(STRIDE_X))) - (ivec4(PAD_X));
- int start_y = int(gl_GlobalInvocationID.y) * STRIDE_Y - PAD_Y;
- ivec4 end_x = min((start_x + (ivec4(3))), (ivec4(MAX_WIDTH)));
- int end_y = min((start_y + 3), MAX_HEIGHT);
-#if defined(EXCLUDE_PADDING)
- start_x = max(ivec4(0), start_x);
- start_y = max(0, start_y);
-#endif /* defined(EXCLUDE_PADDING) */
- res *= (vec4((1.f)) / vec4((ivec4(end_y - start_y)) * (end_x - start_x)));
-#endif /*POOL_AVG*/
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- res = SQRT_OP(res);
-#endif /* defined(POOL_L2) */
-
- VSTORE2_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, res);
-}
-
-#elif defined(POOLING_LAYER_N)
-
-void main(void)
-{
- // Get pixels pointer
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- vec4 vdata00 = vec4(INITIAL_VALUE);
- vec4 vdata01 = vec4(INITIAL_VALUE);
- vec4 vdata10 = vec4(INITIAL_VALUE);
- vec4 vdata11 = vec4(INITIAL_VALUE);
- vec2 sdata = vec2(INITIAL_VALUE);
-
- for(int y = 0; y < int(POOL_SIZE); y++)
- {
- int x = 0;
- for(; x <= (int(POOL_SIZE) - 8); x += 8)
- {
- vec4 data2 = VLOAD2_UNPACK4_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, x, y, 0));
- vec4 data3 = VLOAD2_UNPACK4_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, x, y, 0) + uint(2));
-
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data2 *= data2;
- data3 *= data3;
-#endif /* defined(POOL_L2) */
-
- POOL_OP(vdata00, vdata00, data2);
- POOL_OP(vdata10, vdata10, data3);
- }
-
- // Leftover
- for(; x < int(POOL_SIZE); x = x + 2)
- {
- vec2 data4middle = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, x, y, 0));
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data4middle *= data4middle;
-#endif /* defined(POOL_L2) */
- if((x + 1) >= int(POOL_SIZE))
- {
- POOL_OP_float(sdata.x, sdata.x, data4middle.x);
- }
- else
- {
- float data4;
- POOL_OP_float(data4, data4middle.x, data4middle.y);
- POOL_OP_float(sdata.x, sdata.x, data4);
- }
- }
- }
-
- for(int y = 0; y < int(POOL_SIZE); y++)
- {
- if((STRIDE_X % 2) == 0)
- {
- int x1 = STRIDE_X;
- for(; x1 <= (int(POOL_SIZE + STRIDE_X) - 8); x1 += 8)
- {
- vec4 data2 = VLOAD2_UNPACK4_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, x1, y, 0));
- vec4 data3 = VLOAD2_UNPACK4_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, x1, y, 0) + uint(2));
-
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data2 *= data2;
- data3 *= data3;
-#endif /* defined(POOL_L2) */
-
- POOL_OP(vdata01, vdata01, data2);
- POOL_OP(vdata11, vdata11, data3);
- }
-
- // Leftover
- for(; x1 < int(POOL_SIZE + STRIDE_X); x1 = x1 + 2)
- {
- vec2 data4middle;
- data4middle = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, x1, y, 0));
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data4middle *= data4middle;
-#endif /* defined(POOL_L2) */
- if((x1 + 1) >= int(POOL_SIZE + STRIDE_X))
- {
- POOL_OP_float(sdata.y, sdata.y, data4middle.x);
- }
- else
- {
- float data4;
- POOL_OP_float(data4, data4middle.x, data4middle.y);
- POOL_OP_float(sdata.y, sdata.y, data4);
- }
- }
- }
- else
- {
- vec2 dataorigin2;
- dataorigin2 = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, (STRIDE_X - 1), y, 0));
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- dataorigin2.y *= dataorigin2.y;
-#endif /* defined(POOL_L2) */
- POOL_OP_float(sdata.y, sdata.y, dataorigin2.y);
-
- int x1 = STRIDE_X + 1;
- for(; x1 <= (int(POOL_SIZE + STRIDE_X) - 8); x1 += 8)
- {
- vec4 data2 = VLOAD2_UNPACK4_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, x1, y, 0));
- vec4 data3 = VLOAD2_UNPACK4_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, x1, y, 0) + uint(2));
-
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data2 *= data2;
- data3 *= data3;
-#endif /* defined(POOL_L2) */
-
- POOL_OP(vdata01, vdata01, data2);
- POOL_OP(vdata11, vdata11, data3);
- }
-
- // Leftover
- for(; x1 < int(POOL_SIZE + STRIDE_X); x1 = x1 + 2)
- {
- vec2 data4middle = LOAD_UNPACK2_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, x1, y, 0));
-#if defined(POOL_L2)
- // Raise to power of 2 for L2 Pooling
- data4middle *= data4middle;
-#endif /* defined(POOL_L2) */
- if((x1 + 1) >= int(POOL_SIZE + STRIDE_X))
- {
- POOL_OP_float(sdata.y, sdata.y, data4middle.x);
- }
- else
- {
- float data4;
- POOL_OP_float(data4, data4middle.x, data4middle.y);
- POOL_OP_float(sdata.y, sdata.y, data4);
- }
- }
- }
- }
-
- //Reduce result
- vec4 reduce40;
- POOL_OP(reduce40, vdata00.xyzw, vdata10.xyzw);
- vec2 reduce20;
- POOL_OP_vec2(reduce20, reduce40.xy, reduce40.zw);
- vec4 reduce41;
- POOL_OP(reduce41, vdata01.xyzw, vdata11.xyzw);
- vec2 reduce21;
- POOL_OP_vec2(reduce21, reduce41.xy, reduce41.zw);
- vec2 data;
- POOL_OP_float(data.x, reduce20.x, reduce20.y);
- POOL_OP_float(data.x, data.x, sdata.x);
- POOL_OP_float(data.y, reduce21.x, reduce21.y);
- POOL_OP_float(data.y, data.y, sdata.y);
-
-#if defined(POOL_AVG) || defined(POOL_L2)
- {
- // Divide by pool region in case of average pooling
- int start_x1 = (2 * int(gl_GlobalInvocationID.x)) * STRIDE_X - PAD_X;
- int start_y1 = int(gl_GlobalInvocationID.y) * STRIDE_Y - PAD_Y;
- int end_x1 = int(min(start_x1 + POOL_SIZE, MAX_WIDTH));
- int end_y1 = int(min(start_y1 + POOL_SIZE, MAX_HEIGHT));
- int start_x2 = start_x1 + STRIDE_X;
- int start_y2 = start_y1;
- int end_x2 = int(min(start_x2 + POOL_SIZE, MAX_WIDTH));
- int end_y2 = int(min(start_y2 + POOL_SIZE, MAX_HEIGHT));
-#if defined(EXCLUDE_PADDING)
- start_x1 = max(0, start_x1);
- start_y1 = max(0, start_y1);
- start_x2 = max(0, start_x2);
- start_y2 = max(0, start_y2);
-#endif /* defined(EXCLUDE_PADDING) */
- vec2 res1;
- res1.x = float((end_y1 - start_y1) * (end_x1 - start_x1));
- res1.y = float((end_y2 - start_y2) * (end_x2 - start_x2));
- data.x = DIV_OP(data.x, res1.x);
- data.y = DIV_OP(data.y, res1.y);
- }
-#endif /* defined(POOL_AVG) || defined(POOL_L2) */
-
-#if defined(POOL_L2)
- // Take square root of the result in L2 pooling
- data = SQRT_OP(data);
-#endif /* defined(POOL_L2) */
-
- // Store result
- STORE_PACK2_CURRENT_ITEM_HALF(dst_ptr, dst_iter, data);
-}
-#endif // POOLING_LAYER_N
-
-#else // DATA_TYPE_FP32
-#error Data type not supported
-#endif // DATA_TYPE_FP32
diff --git a/src/core/GLES_COMPUTE/cs_shaders/scale.cs b/src/core/GLES_COMPUTE/cs_shaders/scale.cs
deleted file mode 100644
index 63be478053..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/scale.cs
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2016-2019 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-// We DO have to use highp for DATA_TYPE_FP16 float here to calculate the coordinates of source tensor. float is highp by default, but we still write it down here to make it more clearly, and mediump is only used for src/dst tensor in shader body.
-precision highp float;
-
-/** Performs an affine transformation on an tensor interpolating with the NEAREAST NEIGHBOUR method. Input and output are single channel FP16.
- *
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: FP16.
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: FP16. (Must be the same as the input)
- * @param[in] dst_attrs The attributes of the destination tensor
- * @param[in] input_width Input tensor width
- * @param[in] input_height Input tensor height
- * @param[in] scale The scale factor along x/y dimension
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes dst_attrs;
- float input_width;
- float input_height;
- vec2 scale;
-};
-
-#if defined(DATA_TYPE_FP16)
-#if defined(SCALE_NEAREST_GENERIC)
-TENSOR_DECLARATION(1, srcBuffer, uint, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-
-vec4[2] transform_nearest(vec2 coord, vec2 scale)
-{
- vec4 in_x_coords = vec4(coord.x, 1.f + coord.x, 2.f + coord.x, 3.f + coord.x);
-
- vec4[2] t;
-#if defined(SAMPLING_POLICY_CENTER) /* SAMPLING_POLICY_CENTER */
- t[0] = (in_x_coords + (vec4(0.5f))) * scale.x;
- t[1] = vec4((coord.y + 0.5f) * scale.y);
-#elif defined(SAMPLING_POLICY_TOP_LEFT) /* SAMPLING_POLICY_TOP_LEFT */
- t[0] = in_x_coords * scale.x;
- t[1] = vec4(coord.y) * scale.y;
-#else /* Unsupported sampling policy */
-#error Unsupported sampling policy
-#endif /* SAMPLING_POLICY */
-
- return t;
-}
-
-vec4[2] clamp_to_border_with_size(vec4[2] coords, float width, float height, float border_size)
-{
- vec4[2] c;
- c[0] = clamp(coords[0], 0.0f - border_size, width - 1.f + border_size);
- c[1] = clamp(coords[1], 0.0f - border_size, height - 1.f + border_size);
-
- return c;
-}
-
-void main()
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- vec4[2] tc = clamp_to_border_with_size(transform_nearest(vec2(gl_GlobalInvocationID.x << uint(2), gl_GlobalInvocationID.y), scale), input_width, input_height, float(BORDER_SIZE));
-
- mediump vec2 s = vec2(0.0f);
- mediump vec4 d = vec4(0.0f);
-
- for(int i = 0; i < 4; i++)
- {
- uint offset_in_bytes = tensor3D_offset_in_bytes(src_iter, int(tc[0][i]), int(tc[1][i]), int(gl_GlobalInvocationID.z));
-
- s = LOAD_UNPACK2_HALF(src_ptr, uint(offset_in_bytes >> src_shift));
-
- if(offset_in_bytes % uint(4) == uint(0))
- {
- d[i] = s.x;
- }
- else
- {
- d[i] = s.y;
- }
- }
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, d);
-}
-#elif defined(SCALE_NEAREST_8X) /* SCALE_NEAREST_GENERIC */
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
-
-void main()
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR_NO_STEP(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-
- uvec2 tc = uvec2(gl_GlobalInvocationID.x << uint(2), gl_GlobalInvocationID.y >> uint(1));
-
- mediump vec4 s = vec4(0.0f);
- mediump vec4[2] d;
-
- s = LOAD_UNPACK4_HALF(src_ptr, TENSOR3D_OFFSET(src_iter, int(tc[0]), int(tc[1]), int(gl_GlobalInvocationID.z)));
-
- d[0] = vec4(s.x, s.x, s.y, s.y);
- d[1] = vec4(s.z, s.z, s.w, s.w);
-
- STORE_PACK8_CURRENT_ITEM_HALF(dst_ptr, dst_iter, d);
-}
-#endif /* SCALE_NEAREST_GENERIC */
-
-#else /* DATA_TYPE_FP16 */
-#error Data type not supported
-#endif /* DATA_TYPE_FP16 */
diff --git a/src/core/GLES_COMPUTE/cs_shaders/softmax_layer.cs b/src/core/GLES_COMPUTE/cs_shaders/softmax_layer.cs
deleted file mode 100644
index 0293943da1..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/softmax_layer.cs
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2017 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-#if defined(DATA_TYPE_FP16)
-precision mediump float;
-#endif // DATA_TYPE_FP16
-
-// Common definitions
-#define MAX_OP(x, y) max((x), (y))
-#define ADD_OP(x, y) ((x) + (y))
-#define SUB_OP(x, y) ((x) - (y))
-#define DIV_OP(x, y) ((x) / (y))
-#define EXP_OP(x) exp((x))
-
-const float float_min = -1.0 / 0.0;
-const vec4 vec4_min = vec4(float_min);
-
-#ifdef SOFTMAX_LAYER_MAX
-
-/** Identifies the maximum value across the 1st dimension.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- * @note In case the input is not multiple of 8 NON_MULTIPLE_OF_8 must be passed.
- *
- * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- * @param[in] width Input image width
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes dst_attrs;
- uint width;
-};
-
-#if defined(DATA_TYPE_FP32)
-
-TENSOR_DECLARATION(1, srcBuffer, vec4[2], src_ptr, src_shift, 5, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-
-void main(void)
-{
- ImageIterator src_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
-
- // Initialize local maximum
- vec4 max_val = vec4_min;
-
- // Calculate max of row
- uint width3 = width >> 3;
- for(int i = 0; i < int(width3); i++)
- {
- vec4 data[2] = LOAD(src_ptr, IMAGE_OFFSET(src_iter, i << 3, 0));
- max_val = MAX_OP(data[0], max_val);
- max_val = MAX_OP(data[1], max_val);
- }
-
-#ifdef NON_MULTIPLE_OF_8
- // Handle non multiple of 8
- vec4 data[2] = LOAD(src_ptr, IMAGE_OFFSET(src_iter, width3 << 3, 0));
- int idx = 0;
- if(width >> 2 != width3 << 1)
- {
- max_val = MAX_OP(data[0], max_val);
- idx = 1;
- }
- for(int i = 0; i < int(width) % 4; i++)
- {
- max_val.x = MAX_OP(data[idx][i], max_val.x);
- }
-#endif /* NON_MULTIPLE_OF_8 */
-
- // Perform max reduction
- max_val.xy = MAX_OP(max_val.xy, max_val.zw);
- max_val.x = MAX_OP(max_val.x, max_val.y);
-
- // Store result
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, max_val.x);
-}
-#elif defined(DATA_TYPE_FP16)
-
-TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uint, dst_ptr, dst_shift, 2, writeonly);
-
-void main(void)
-{
- ImageIterator src_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
-
- // Initialize local maximum
- vec4 max_val = vec4_min;
-
- // Calculate max of row
- uint width3 = width >> 3;
- for(int i = 0; i < int(width3); i++)
- {
- vec4 data[2] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, i << 3, 0));
- max_val = MAX_OP(data[0], max_val);
- max_val = MAX_OP(data[1], max_val);
- }
-
-#ifdef NON_MULTIPLE_OF_8
- // Handle non multiple of 8
- vec4 data[2] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, width3 << 3, 0));
- int idx = 0;
- if(width >> 2 != width3 << 1)
- {
- max_val = MAX_OP(data[0], max_val);
- idx = 1;
- }
- for(int i = 0; i < int(width) % 4; i++)
- {
- max_val.x = MAX_OP(data[idx][i], max_val.x);
- }
-#endif /* NON_MULTIPLE_OF_8 */
-
- // Perform max reduction
- max_val.xy = MAX_OP(max_val.xy, max_val.zw);
- max_val.x = MAX_OP(max_val.x, max_val.y);
-
- STORE_PACK2_CURRENT_ITEM_HALF(dst_ptr, dst_iter, max_val.xy);
-}
-#else // DATA_TYPE_FP32
-#error Data type not supported
-#endif // DATA_TYPE_FP32
-#elif defined(SOFTMAX_LAYER_SHIFT_EXP_SUM)
-
-/** Shifts the values of the input tensor by the max calculated in softmax_layer_max kernel,
- * then gets the exponent of each element as sums all elements across each row.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- * @note In case the input is not multiple of 8 NON_MULTIPLE_OF_8 must be passed.
- *
- * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[in] max_ptr Pointer to the max values tensor slice. Supported data types: same as @p src_ptr
- * @param[in] max_attrs The attributes of the max values tensor
- * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- * @param[out] sum_ptr Pointer to the sum values tensor slice. Supported data types: same as @p src_ptr
- * @param[in] sum_attrs The attributes of the sum values tensor
- * @param[in] width Input image width
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes max_attrs;
- Tensor3DAttributes dst_attrs;
- Tensor3DAttributes sum_attrs;
- uint width;
-};
-#if defined(DATA_TYPE_FP32)
-
-TENSOR_DECLARATION(1, srcBuffer, vec4[2], src_ptr, src_shift, 5, readonly);
-TENSOR_DECLARATION(2, maxBuffer, float, max_ptr, max_shift, 2, readonly);
-TENSOR_DECLARATION(3, dstBuffer, vec4[2], dst_ptr, dst_shift, 5, writeonly);
-TENSOR_DECLARATION(4, sumBuffer, float, sum_ptr, sum_shift, 2, writeonly);
-
-void main(void)
-{
- ImageIterator src_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
- ImageIterator max_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(max_attrs, max_shift);
- ImageIterator sum_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(sum_attrs, sum_shift);
-
- // Load max value of 1D logits vector (row)
- vec4 max_val = vec4(LOAD_CURRENT_ITEM(max_ptr, max_iter));
-
- // Set sum vector
- vec4 sum1D = vec4(0);
-
- // Shift values, exp and sum
- uint width3 = width >> 3;
- for(int i = 0; i < int(width3); i++)
- {
- vec4 data[2];
- data = LOAD(src_ptr, IMAGE_OFFSET(src_iter, i << 3, 0));
- data[0] = SUB_OP(data[0], max_val);
- data[1] = SUB_OP(data[1], max_val);
- data[0] = EXP_OP(data[0]);
- data[1] = EXP_OP(data[1]);
- STORE(dst_ptr, IMAGE_OFFSET(dst_iter, i << 3, 0), data);
- sum1D = ADD_OP(sum1D, data[0]);
- sum1D = ADD_OP(sum1D, data[1]);
- }
-
-#ifdef NON_MULTIPLE_OF_8
- // Handle non multiple of 8
- vec4 data[2] = LOAD(src_ptr, IMAGE_OFFSET(src_iter, width3 << 3, 0));
- int idx = 0;
- if(width >> 2 != width3 << 1)
- {
- data[0] = SUB_OP(data[0], max_val);
- data[0] = EXP_OP(data[0]);
- sum1D = ADD_OP(sum1D, data[0]);
- idx = 1;
- }
- for(int i = 0; i < int(width) % 4; i++)
- {
- data[idx][i] = SUB_OP(data[idx][i], max_val.x);
- data[idx][i] = EXP_OP(data[idx][i]);
- sum1D.x = ADD_OP(sum1D.x, data[idx][i]);
- }
- STORE(dst_ptr, IMAGE_OFFSET(dst_iter, width3 << 3, 0), data);
-#endif /* NON_MULTIPLE_OF_8 */
-
- // Perform min/max reduction
- sum1D.xy = ADD_OP(sum1D.xy, sum1D.zw);
- sum1D.x = ADD_OP(sum1D.x, sum1D.y);
-
- // Calculate and store result
- STORE_CURRENT_ITEM(sum_ptr, sum_iter, sum1D.x);
-}
-#elif defined(DATA_TYPE_FP16)
-
-TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
-TENSOR_DECLARATION(2, maxBuffer, uint, max_ptr, max_shift, 2, readonly);
-TENSOR_DECLARATION(3, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
-TENSOR_DECLARATION(4, sumBuffer, uint, sum_ptr, sum_shift, 2, writeonly);
-
-void main(void)
-{
- ImageIterator src_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
- ImageIterator max_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(max_attrs, max_shift);
- ImageIterator sum_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(sum_attrs, sum_shift);
-
- // Load max value of 1D logits vector (row)
- vec2 datamaxinit = LOAD_UNPACK2_CURRENT_ITEM_HALF(max_ptr, max_iter);
- vec4 max_val = vec4(datamaxinit.x);
-
- // Set sum vector
- vec4 sum1D = vec4(0.f);
-
- // Shift values, exp and sum
- uint width3 = width >> 3;
- for(int i = 0; i < int(width3); i++)
- {
- vec4 data[2] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, i << 3, 0));
- data[0] = SUB_OP(data[0], max_val);
- data[1] = SUB_OP(data[1], max_val);
- data[0] = EXP_OP(data[0]);
- data[1] = EXP_OP(data[1]);
- STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, i << 3, 0), data);
- sum1D = ADD_OP(sum1D, data[0]);
- sum1D = ADD_OP(sum1D, data[1]);
- }
-
-#ifdef NON_MULTIPLE_OF_8
- // Handle non multiple of 8
- vec4 data[2] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, width3 << 3, 0));
- int idx = 0;
- if(width >> 2 != width3 << 1)
- {
- data[0] = SUB_OP(data[0], max_val);
- data[0] = EXP_OP(data[0]);
- sum1D = ADD_OP(sum1D, data[0]);
- idx = 1;
- }
- for(int i = 0; i < int(width) % 4; i++)
- {
- data[idx][i] = SUB_OP(data[idx][i], max_val.x);
- data[idx][i] = EXP_OP(data[idx][i]);
- sum1D.x = ADD_OP(sum1D.x, data[idx][i]);
- }
- STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, width3 << 3, 0), data);
-#endif /* NON_MULTIPLE_OF_8 */
- // Perform min/max reduction
- sum1D.xy = ADD_OP(sum1D.xy, sum1D.zw);
- sum1D.x = ADD_OP(sum1D.x, sum1D.y);
-
- // Calculate and store result
- STORE_PACK2_CURRENT_ITEM_HALF(sum_ptr, sum_iter, sum1D.xy);
-}
-#else // DATA_TYPE_FP32
-#error Data type not supported
-#endif // DATA_TYPE_FP32
-#elif defined(SOFTMAX_LAYER_NORM)
-
-/** Divides all the values of the input tensor by the sum calculated from softmax_layer_shift_exp_sum kernel.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- *
- * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[in] sum_ptr Pointer to the sum values tensor slice. Supported data types: same as @p src_ptr
- * @param[in] sum_attrs The attributes of the sum values tensor
- * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: same as @p src_ptr
- * @param[in] dst_attrs The attributes of the destination tensor
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- Tensor3DAttributes sum_attrs;
- Tensor3DAttributes dst_attrs;
-};
-#if defined(DATA_TYPE_FP32)
-TENSOR_DECLARATION(1, srcBuffer, vec4[2], src_ptr, src_shift, 5, readonly);
-TENSOR_DECLARATION(2, sumBuffer, float, sum_ptr, sum_shift, 2, readonly);
-TENSOR_DECLARATION(3, dstBuffer, vec4[2], dst_ptr, dst_shift, 5, writeonly);
-void main(void)
-{
- ImageIterator src_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
- ImageIterator sum_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR_NO_STEP(sum_attrs, sum_shift);
-
- // Load max value of 1D logits vector (row)
- vec4 sum_val = vec4(LOAD(sum_ptr, IMAGE_OFFSET(sum_iter, 0, gl_GlobalInvocationID.y)));
-
- vec4 data[2] = LOAD_CURRENT_ITEM(src_ptr, src_iter);
- data[0] = DIV_OP(data[0], sum_val);
- data[1] = DIV_OP(data[1], sum_val);
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, data);
-}
-#elif defined(DATA_TYPE_FP16)
-TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
-TENSOR_DECLARATION(2, sumBuffer, uint, sum_ptr, sum_shift, 2, readonly);
-TENSOR_DECLARATION(3, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
-void main(void)
-{
- ImageIterator src_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
- ImageIterator sum_iter = CONVERT_TENSOR3D_TO_IMAGE_ITERATOR_NO_STEP(sum_attrs, sum_shift);
-
- // Load max value of 1D logits vector (row)
- vec4 sum_val = vec4(LOAD_UNPACK2_HALF(sum_ptr, IMAGE_OFFSET(sum_iter, 0, gl_GlobalInvocationID.y)).x);
-
- vec4 data[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src_ptr, src_iter);
- data[0] = DIV_OP(data[0], sum_val);
- data[1] = DIV_OP(data[1], sum_val);
- STORE_PACK8_CURRENT_ITEM_HALF(dst_ptr, dst_iter, data);
-}
-#else // DATA_TYPE_FP32
-#error Data type not supported
-#endif // DATA_TYPE_FP32
-#endif // SOFTMAX_LAYER_MAX
diff --git a/src/core/GLES_COMPUTE/cs_shaders/tensor_shift.cs b/src/core/GLES_COMPUTE/cs_shaders/tensor_shift.cs
deleted file mode 100644
index cd2dcdeb5b..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/tensor_shift.cs
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2017-2018 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-#if defined(DATA_TYPE_FP16)
-precision mediump float;
-#endif // DATA_TYPE_FP16
-
-/** This kernel performs a shift to move "pad_x" columns to the right.
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- * @note The width must be passed at compile time using "#define WIDTH n" e.g. "#define WIDTH 1"
- *
- * @param[in,out] src_ptr Pointer to the source tensor slice. Supported data types: F16/F32
- * @param[in] src_attrs The attributes of the source tensor
- * @param[in] pad_x The padding of the source tensor in x dimension
- */
-SHADER_PARAMS_DECLARATION
-{
- Tensor3DAttributes src_attrs;
- uint pad_x;
-};
-
-#if defined(DATA_TYPE_FP16)
-TENSOR_DECLARATION(1, srcBuffer, uint, src_ptr, src_shift, 2, restrict);
-
-void main()
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- int n = int(pad_x) % 2;
-
- if(n == 1)
- {
- int i = 0;
- if((WIDTH % 2) == 1)
- {
- i = WIDTH + int(pad_x) - 2;
- }
- else
- {
- vec2 s0_end = LOAD_UNPACK2_HALF(src_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, (2 * (WIDTH - 2))));
- vec2 s_end = vec2(s0_end.y, 0.f);
- STORE_PACK2_HALF(src_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, (2 * (WIDTH + int(pad_x) - 1))), s_end);
- i = WIDTH + int(pad_x) - 3;
- }
- for(; i >= (int(pad_x) + 1); i = i - 2)
- {
- vec2 s0 = LOAD_UNPACK2_HALF(src_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, (2 * (i - int(pad_x) - 1))));
- vec2 s1 = LOAD_UNPACK2_HALF(src_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, (2 * (i - int(pad_x) + 1))));
- vec2 s = vec2(s0.y, s1.x);
- STORE_PACK2_HALF(src_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, (2 * i)), s);
- }
- for(int j = 0; j < (int(pad_x) - 1); j = j + 2)
- {
- vec2 s_origin = vec2(0.f);
- STORE_PACK2_CURRENT_ITEM_HALF(src_ptr, src_iter, s_origin);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, 4);
- }
- vec2 s0_origin = LOAD_UNPACK2_CURRENT_ITEM_HALF(src_ptr, src_iter);
- vec2 s_origin = vec2(0.f, s0_origin.x);
- STORE_PACK2_CURRENT_ITEM_HALF(src_ptr, src_iter, s_origin);
- }
- else
- {
- int i = 0;
- if((WIDTH % 2) == 0)
- {
- i = WIDTH + int(pad_x) - 2;
- }
- else
- {
- vec2 s0_end = LOAD_UNPACK2_HALF(src_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, (2 * (WIDTH - 1))));
- vec2 s_end = vec2(s0_end.x, 0.f);
- STORE_PACK2_HALF(src_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, (2 * (WIDTH + int(pad_x) - 1))), s_end);
- i = WIDTH + int(pad_x) - 3;
- }
- for(; i >= (int(pad_x)); i = i - 2)
- {
- vec2 s = LOAD_UNPACK2_HALF(src_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, (2 * (i - int(pad_x)))));
- STORE_PACK2_HALF(src_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(src_iter, (2 * i)), s);
- }
- for(int j = 0; j < int(pad_x); j = j + 2)
- {
- vec2 s = vec2(0.f);
- STORE_PACK2_CURRENT_ITEM_HALF(src_ptr, src_iter, s);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, 4);
- }
- }
-}
-#elif defined(DATA_TYPE_FP32)
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, restrict);
-
-void main()
-{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
-
- for(int i = (WIDTH + int(pad_x) - 1); i >= int(pad_x); i--)
- {
- float sorigin = LOAD(src_ptr, TENSOR_OFFSET_ADVANCE(src_iter, (i - int(pad_x))));
- STORE(src_ptr, TENSOR_OFFSET_ADVANCE(src_iter, i), sorigin);
- }
- for(int j = 0; j < int(pad_x); j++)
- {
- STORE_CURRENT_ITEM(src_ptr, src_iter, 0.f);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(src_iter, 4);
- }
-}
-#else /* DATA_TYPE_FP16 */
-#error Data type not supported
-#endif /* DATA_TYPE_FP16 */
diff --git a/src/core/GLES_COMPUTE/cs_shaders/transpose.cs b/src/core/GLES_COMPUTE/cs_shaders/transpose.cs
deleted file mode 100755
index 72ade20c80..0000000000
--- a/src/core/GLES_COMPUTE/cs_shaders/transpose.cs
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Copyright (c) 2017, 2018 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
-
-#include "helpers_cs.h"
-
-#if defined(DATA_TYPE_FP16)
-precision mediump float;
-#endif // DATA_TYPE_FP16
-
-#define SWAP_ROW_func(u0, l0) \
- { \
- tmp_swap = u0; \
- u0 = l0; \
- l0 = tmp_swap; \
- }
-
-#define SWAP_4x4_func(u0, u1, u2, u3, l0, l1, l2, l3) \
- { \
- vec4 tmp_swap; \
- SWAP_ROW_func(u0, l0); \
- SWAP_ROW_func(u1, l1); \
- SWAP_ROW_func(u2, l2); \
- SWAP_ROW_func(u3, l3); \
- }
-
-#define TRANSPOSE_4x4_func(u0, u1, u2, u3) \
- { \
- mat4x4 matin, matout; \
- matin[0] = u0; \
- matin[1] = u1; \
- matin[2] = u2; \
- matin[3] = u3; \
- matout = transpose(matin); \
- u0 = matout[0]; \
- u1 = matout[1]; \
- u2 = matout[2]; \
- u3 = matout[3]; \
- }
-
-/** This OpenGL ES kernel computes the matrix transposition of input matrix
- *
- * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
- * @note Optimization name must be passed using "#define OPTIMIZATION_NAME" for F16. e.g. "#define TRANSPOSE_8X8"
- *
- * @param[in] src_ptr Pointer to the source matrix. Supported data types: F32/F16
- * @param[in] src_attrs The attributes of the source matrix
- * @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as src_ptr
- * @param[in] dst_attrs The attributes of the destination matrix
- */
-SHADER_PARAMS_DECLARATION
-{
- ImageAttributes src_attrs;
- ImageAttributes dst_attrs;
-};
-
-#ifdef DATA_TYPE_FP32
-TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
-TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
-
-void main(void)
-{
- // compute source address
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
-
- // load the NxN block at (x, y)
- vec4 u0 = VLOAD4(vec4, src_ptr, IMAGE_OFFSET(src_iter, 0, 0));
- vec4 u1 = VLOAD4(vec4, src_ptr, IMAGE_OFFSET(src_iter, 0, 1));
- vec4 u2 = VLOAD4(vec4, src_ptr, IMAGE_OFFSET(src_iter, 0, 2));
- vec4 u3 = VLOAD4(vec4, src_ptr, IMAGE_OFFSET(src_iter, 0, 3));
-
- // transpose the block
- TRANSPOSE_4x4_func(u0, u1, u2, u3);
-
- // store the block at (y, x)
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, uint(16) * uint(gl_GlobalInvocationID.y) + uint(4) * uint(gl_GlobalInvocationID.x) * (dst_attrs.stride_y));
-
- VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 0), u0);
- VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), u1);
- VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), u2);
- VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), u3);
-}
-
-#elif defined(DATA_TYPE_FP16) /* DATA_TYPE_FP16 */
-
-#if defined(TRANSPOSE_4X4)
-TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
-
-void main(void)
-{
- // compute source address
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
-
- // load the NxN block at (x, y)
- vec4 u0 = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 0));
- vec4 u1 = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 1));
- vec4 u2 = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 2));
- vec4 u3 = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 3));
-
- // transpose the block
- TRANSPOSE_4x4_func(u0, u1, u2, u3);
-
- // store the block at (y, x)
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, uint(8) * uint(gl_GlobalInvocationID.y) + uint(gl_GlobalInvocationID.x) * (dst_attrs.step_y));
-
- STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 0), u0);
- STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), u1);
- STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), u2);
- STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), u3);
-}
-
-#elif defined(TRANSPOSE_8X8) /* TRANSPOSE_8X8 */
-TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
-
-void main(void)
-{
- // compute source address
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
-
- vec4 u[8][2];
-
- for(int i = 0; i < 8; i++)
- {
- u[i] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, i));
- }
-
- // transpose the block
- TRANSPOSE_4x4_func(u[0][0], u[1][0], u[2][0], u[3][0]);
- TRANSPOSE_4x4_func(u[0][1], u[1][1], u[2][1], u[3][1]);
- TRANSPOSE_4x4_func(u[4][0], u[5][0], u[6][0], u[7][0]);
- TRANSPOSE_4x4_func(u[4][1], u[5][1], u[6][1], u[7][1]);
- SWAP_4x4_func(u[0][1], u[1][1], u[2][1], u[3][1], u[4][0], u[5][0], u[6][0], u[7][0]);
-
- // store the block at (y, x)
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, uint(16) * uint(gl_GlobalInvocationID.y) + uint(gl_GlobalInvocationID.x) * (dst_attrs.step_y));
-
- for(int i = 0; i < 8; i++)
- {
- STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, i), u[i]);
- }
-}
-
-#elif defined(TRANSPOSE_8X8_SQUARE) /* TRANSPOSE_8x8_SQUARE */
-TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
-TENSOR_DECLARATION(2, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
-
-void main(void)
-{
- ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
- ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
-
- if(gl_GlobalInvocationID.x <= gl_GlobalInvocationID.y)
- {
- uint blk1_offset_in_bytes = CURRENT_ITEM_OFFSET_IN_BYTES(src_iter);
- TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, uint(16) * uint(gl_GlobalInvocationID.y) + uint(gl_GlobalInvocationID.x) * (dst_attrs.step_y));
- uint blk2_offset_in_bytes = CURRENT_ITEM_OFFSET_IN_BYTES(dst_iter);
-
- // load block1
- vec4 u1[8][2];
-
- SET_TENSOR_ITERATOR_OFFSET_IN_BYTES(src_iter, blk1_offset_in_bytes);
- for(int i = 0; i < 8; i++)
- {
- u1[i] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, i));
- }
-
- // transpose block1
- TRANSPOSE_4x4_func(u1[0][0], u1[1][0], u1[2][0], u1[3][0]);
- TRANSPOSE_4x4_func(u1[0][1], u1[1][1], u1[2][1], u1[3][1]);
- TRANSPOSE_4x4_func(u1[4][0], u1[5][0], u1[6][0], u1[7][0]);
- TRANSPOSE_4x4_func(u1[4][1], u1[5][1], u1[6][1], u1[7][1]);
- SWAP_4x4_func(u1[0][1], u1[1][1], u1[2][1], u1[3][1], u1[4][0], u1[5][0], u1[6][0], u1[7][0]);
-
- // write to block2
- SET_TENSOR_ITERATOR_OFFSET_IN_BYTES(dst_iter, blk2_offset_in_bytes);
- for(int i = 0; i < 8; i++)
- {
- STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, i), u1[i]);
- }
-
- // load block2
- vec4 u2[8][2];
-
- SET_TENSOR_ITERATOR_OFFSET_IN_BYTES(src_iter, blk2_offset_in_bytes);
- for(int i = 0; i < 8; i++)
- {
- u2[i] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, i));
- }
-
- // transpose block2
- TRANSPOSE_4x4_func(u2[0][0], u2[1][0], u2[2][0], u2[3][0]);
- TRANSPOSE_4x4_func(u2[0][1], u2[1][1], u2[2][1], u2[3][1]);
- TRANSPOSE_4x4_func(u2[4][0], u2[5][0], u2[6][0], u2[7][0]);
- TRANSPOSE_4x4_func(u2[4][1], u2[5][1], u2[6][1], u2[7][1]);
- SWAP_4x4_func(u2[0][1], u2[1][1], u2[2][1], u2[3][1], u2[4][0], u2[5][0], u2[6][0], u2[7][0]);
-
- // write to block1
- SET_TENSOR_ITERATOR_OFFSET_IN_BYTES(dst_iter, blk1_offset_in_bytes);
- for(int i = 0; i < 8; i++)
- {
- STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, i), u2[i]);
- }
- }
-}
-
-#endif /* TRANSPOSE_4X4 */
-
-#endif /* DATA_TYPE_FP32 */