diff options
Diffstat (limited to 'src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/gemm_mm_native.cl')
-rw-r--r-- | src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/gemm_mm_native.cl | 372 |
1 files changed, 0 insertions, 372 deletions
diff --git a/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/gemm_mm_native.cl b/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/gemm_mm_native.cl deleted file mode 100644 index 22ae098772..0000000000 --- a/src/core/CL/cl_kernels/common/experimental/gemm_fused_post_ops/act_eltwise_op_act/gemm_mm_native.cl +++ /dev/null @@ -1,372 +0,0 @@ -/* - * Copyright (c) 2021-2022 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common/experimental/gemm_fused_post_ops/act_eltwise_op_act/fp_post_ops_act_eltwise_op_act.h" -#include "common/experimental/gemm_fused_post_ops/fp_elementwise_op_helpers.h" -#include "common/experimental/gemm_fused_post_ops/fp_mixed_precision_helpers.h" - -#include "gemm_helpers.h" -#include "repeat.h" - -/** (EXPERIMENTAL_POST_OPS) gemm_mm_native kernel */ -#if defined(M0) && defined(N0) && defined(K0) && defined(DATA_TYPE) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) -#if defined(P2_ELTWISE_OP) && defined(P2_ELTWISE_ARG1_HEIGHT) && defined(P2_ELTWISE_ARG1_WIDTH) - -#define VFMA(a, b, c) \ - ({ \ - c = fma(a, b, c); \ - }) - -#if M0 == 1 -#define RHS_VFMA_M0xN0(i, a, b, c) \ - ({ \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \ - }) -#elif M0 == 2 // M0 == 2 -#define RHS_VFMA_M0xN0(i, a, b, c) \ - ({ \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \ - }) -#elif M0 == 3 // M0 == 3 -#define RHS_VFMA_M0xN0(i, a, b, c) \ - ({ \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \ - }) -#elif M0 == 4 // M0 == 4 -#define RHS_VFMA_M0xN0(i, a, b, c) \ - ({ \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \ - }) -#elif M0 == 5 // M0 == 5 -#define RHS_VFMA_M0xN0(i, a, b, c) \ - ({ \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \ - }) -#elif M0 == 6 // M0 == 6 -#define RHS_VFMA_M0xN0(i, a, b, c) \ - ({ \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \ - }) -#elif M0 == 7 // M0 == 7 -#define RHS_VFMA_M0xN0(i, a, b, c) \ - ({ \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##6).s##i), b, (c##6)); \ - }) -#elif M0 == 8 // M0 == 8 -#define RHS_VFMA_M0xN0(i, a, b, c) \ - ({ \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##6).s##i), b, (c##6)); \ - VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##7).s##i), b, (c##7)); \ - }) -#else // M0 not supported -#error "M0 not supported" -#endif // M0 not supported - -#if defined(GEMM_MM_NATIVE_POST_ACT_ELTWISE_OP_ACT) -/** This OpenCL kernel computes the matrix multiplication between 2 matrices plus 3 post ops: - * Post op 1: activation (optional) - * Post op 2: elementwise op - * Post op 3: activation (optional) - * - * @note (Optional) -DP1_ACTIVATION_TYPE, -DP1_ACTIVATION_A_VAL, -DP1_ACTIVATION_B_VAL: The activation type, alpha and beta values of the activation post op at slot 3 - * @note (Required) -DP2_ELTWISE_OP: The (binary) elementwise post op to perform - * @note (Required) -DP2_ELTWISE_ARG1_HEIGHT: The height (Y dimension) of the eltwise operand matrix of the eltwise post op at slot 2 - * @note (Required) -DP2_ELTWISE_ARG1_WIDTH: The width (X dimension) of the eltwise operand matrix of the eltwise post op at slot 2 - * @note (Optional) -DP3_ACTIVATION_TYPE, -DP3_ACTIVATION_A_VAL, -DP3_ACTIVATION_B_VAL: The activation type, alpha and beta values of the activation post op at slot 3 - * - * All parameters are similarly defined in kernel gemm_mm_native, with these additions: - * - * @param[in] eltwise_operand_ptr Pointer to the eltwise operand matrix. Supported data type: F16/F32 - * @param[in] eltwise_operand_stride_x Stride of the eltwise operand matrix in X dimension (in bytes) - * @param[in] eltwise_operand_step_x eltwise_operand_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] eltwise_operand_stride_y Stride of the eltwise operand matrix in Y dimension (in bytes) - * @param[in] eltwise_operand_step_y eltwise_operand_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] eltwise_operand_stride_z Stride of the eltwise operand tensor in Z dimension (in bytes) - */ -__kernel void gemm_mm_native_post_act_eltwise_op_act(IMAGE_DECLARATION(lhs), - IMAGE_DECLARATION(rhs), -#if defined(BETA) - IMAGE_DECLARATION(bias), -#endif // defined(BETA) - IMAGE_DECLARATION(dst), - // Post Op arguments - IMAGE_DECLARATION(eltwise_operand), - uint lhs_stride_z, - uint rhs_stride_z, -#if defined(BETA) - uint bias_stride_z, -#endif //defined(BETA) - uint dst_stride_z, - uint eltwise_operand_stride_z, - const int M, - const int N, - const int K -#if defined(REINTERPRET_INPUT_AS_3D) - , - uint lhs_cross_plane_pad -#endif // REINTERPRET_INPUT_AS_3D -#if defined(REINTERPRET_OUTPUT_AS_3D) - , - uint dst_cross_plane_pad -#endif // REINTERPRET_OUTPUT_AS_3D - ) -{ - // Block size -#define RHS_BLOCK_SIZE ((K0) * (N0)) - - // RHS offset and step X -#define RHS_OFFSET_X (RHS_BLOCK_SIZE) - - uint x = get_global_id(0); - uint y = get_global_id(1); - uint z = get_global_id(2); - -#if defined(DUMMY_WORK_ITEMS) - if((x * N0 >= N) || (y * M0 >= M)) - { - return; - } -#endif // defined(DUMMY_WORK_ITEMS) - - // Compute LHS matrix address - uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y; - - // Compute RHS matrix address - uint rhs_offset = rhs_offset_first_element_in_bytes + x * N0 * sizeof(DATA_TYPE); - -#if defined(MATRIX_B_DEPTH) - // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3 - rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z; -#else // defined(MATRIX_B_DEPTH) - rhs_offset += z * rhs_stride_z; -#endif // defined(MATRIX_B_DEPTH) - - REPEAT_VAR_INIT_TO_CONST(M0, uint, zlhs, 0); - REPEAT_VAR_INIT_TO_CONST(16, uint, zero, 0); - -#if defined(REINTERPRET_INPUT_AS_3D) - // The plane (zlhs) is calculated dividing M (y * M0) by HEIGHT_GEMM3D - CALCULATE_Z_OFFSET(M0, uint, zlhs, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply lhs_stride_z by DEPTH_GEMM3D - lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D; - -#else // defined(REINTERPRET_INPUT_AS_3D) - - // Add offset for batched GEMM - lhs_offset += z * lhs_stride_z; - -#endif // defined(REINTERPRET_INPUT_AS_3D) - - // Initialize the accumulators - REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(M0-1)=0; - - int i = 0; -#if K0 > 1 - for(; i <= (K - K0); i += K0) - { - // Supported cases (M0, K0): - // 1,2 - 1,3 - 1,4 - 1,8 - 1,16 - // 2,2 - 2,3 - 2,4 - 2,8 - 2,16 - // 3,2 - 3,3 - 3,4 - 3,8 - 3,16 - // 4,2 - 4,3 - 4,4 - 4,8 - 4,16 - // 5,2 - 5,3 - 5,4 - 5,8 - 5,16 - // 6,2 - 6,3 - 6,4 - 6,8 - 6,16 - // 7,2 - 7,3 - 7,4 - 7,8 - 7,16 - // 8,2 - 8,3 - 8,4 - 8,8 - 8,16 - // Load values from LHS matrix - LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs); - - // Load values from RHS matrix - LOAD_BLOCK(K0, N0, DATA_TYPE, b, rhs_ptr, rhs_offset, rhs_stride_y, zero); - - RHS_VFMA_M0xN0(0, a, b0, c); - RHS_VFMA_M0xN0(1, a, b1, c); -#if K0 > 2 - RHS_VFMA_M0xN0(2, a, b2, c); -#endif // K0 > 2 -#if K0 > 3 - RHS_VFMA_M0xN0(3, a, b3, c); -#endif // K0 > 3 -#if K0 > 4 - RHS_VFMA_M0xN0(4, a, b4, c); - RHS_VFMA_M0xN0(5, a, b5, c); - RHS_VFMA_M0xN0(6, a, b6, c); - RHS_VFMA_M0xN0(7, a, b7, c); -#endif // K0 > 4 -#if K0 > 8 - RHS_VFMA_M0xN0(8, a, b8, c); - RHS_VFMA_M0xN0(9, a, b9, c); - RHS_VFMA_M0xN0(A, a, bA, c); - RHS_VFMA_M0xN0(B, a, bB, c); - RHS_VFMA_M0xN0(C, a, bC, c); - RHS_VFMA_M0xN0(D, a, bD, c); - RHS_VFMA_M0xN0(E, a, bE, c); - RHS_VFMA_M0xN0(F, a, bF, c); -#endif // K0 > 8 - - lhs_offset += K0 * sizeof(DATA_TYPE); - rhs_offset += K0 * rhs_stride_y; - } -#endif // K0 > 1 - // Left-over accumulations - for(; i < K; ++i) - { - // Load values from LHS matrix - VEC_DATA_TYPE(DATA_TYPE, 2) - a0 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 0 * lhs_stride_y + zlhs0)); -#if M0 > 1 - VEC_DATA_TYPE(DATA_TYPE, 2) - a1 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 1 * lhs_stride_y + zlhs1)); -#endif // M0 > 1 -#if M0 > 2 - VEC_DATA_TYPE(DATA_TYPE, 2) - a2 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 2 * lhs_stride_y + zlhs2)); -#endif // M0 > 2 -#if M0 > 3 - VEC_DATA_TYPE(DATA_TYPE, 2) - a3 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 3 * lhs_stride_y + zlhs3)); -#endif // M0 > 3 -#if M0 > 4 - VEC_DATA_TYPE(DATA_TYPE, 2) - a4 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 4 * lhs_stride_y + zlhs4)); -#endif // M0 > 4 -#if M0 > 5 - VEC_DATA_TYPE(DATA_TYPE, 2) - a5 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 5 * lhs_stride_y + zlhs5)); -#endif // M0 > 5 -#if M0 > 6 - VEC_DATA_TYPE(DATA_TYPE, 2) - a6 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 6 * lhs_stride_y + zlhs6)); -#endif // M0 > 6 -#if M0 > 7 - VEC_DATA_TYPE(DATA_TYPE, 2) - a7 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 7 * lhs_stride_y + zlhs7)); -#endif // M0 > 7 - - VEC_DATA_TYPE(DATA_TYPE, N0) - b = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 0 * rhs_stride_y)); - RHS_VFMA_M0xN0(0, a, b, c); - - lhs_offset += sizeof(DATA_TYPE); - rhs_offset += rhs_stride_y; - } - - __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y); - - REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0); - -#if defined(REINTERPRET_OUTPUT_AS_3D) - // The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D - CALCULATE_Z_OFFSET(M0, uint, zout, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply dst_stride_z by DEPTH_GEMM3D - dst_addr += z * dst_stride_z * DEPTH_GEMM3D; - -#else // defined(REINTERPRET_OUTPUT_AS_3D) - - // Add offset for batched GEMM - dst_addr += z * dst_stride_z; - -#endif // defined(REINTERPRET_OUTPUT_AS_3D) - - // Multiply by the weight of matrix-matrix product and store the result -#if defined(ALPHA) - SCALE_BLOCK(M0, DATA_TYPE, c, ALPHA); -#endif // defined(ALPHA) - - // Add beta*bias -#if defined(BETA) -#if defined(BROADCAST_BIAS) - __global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)); - - LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(1, DATA_TYPE, bias, BETA); -#endif // UNIT_BIAS - - // c = c + bias[broadcasted] - ADD_BLOCK_BROADCAST(M0, c, bias0); - -#else // defined(BROADCAST_BIAS) - __global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * bias_stride_y) + z * bias_stride_z; - - LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(M0, DATA_TYPE, bias, BETA); -#endif // UNIT_BIAS - - // c = c + bias - ADD_BLOCK(M0, c, bias); - -#endif // defined(BROADCAST_BIAS) -#endif // defined(BETA) - - const bool cond_y = y == 0; - const bool cond_x = ((x + 1) * N0 >= N); - - // c = act(c) - POST_OP1_ACTIVATION_OPTIONAL(M0, DATA_TYPE, DATA_TYPE_ACCUMULATOR, N0, c); - // c = c + eltwise_operand (mix-precision, broadcast, boundary aware) - POST_OP2_ELTWISE_OP(P2_ELTWISE_OP, M0, N0, c, eltwise_operand, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), DATA_TYPE, DATA_TYPE_ACCUMULATOR, zero, 1, PARTIAL_STORE_N0, false, cond_x); - // c = act(c) - POST_OP3_ACTIVATION_OPTIONAL(M0, DATA_TYPE, DATA_TYPE_ACCUMULATOR, N0, c); - - // Store output block - STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x); -} -#endif // defined(GEMM_MM_NATIVE_POST_ACT_ELTWISE_OP_ACT) -#endif // defined(P2_ELTWISE_OP) && defined(P2_ELTWISE_ARG1_HEIGHT) && defined(P2_ELTWISE_ARG1_WIDTH) -#endif // defined(M0) && defined(N0) && defined(K0) && defined(DATA_TYPE) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) |