From c9cecc0e565e7b4978cecc92e03e6c93bb8d0cb9 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Fri, 15 Oct 2021 10:23:24 +0100 Subject: Remove legacy GeMM kernels on OpenCL Resolves COMPMID-4446 Change-Id: I1d3c2391b67681f4d3af440826aa95b47a1288a6 Signed-off-by: Gian Marco Iodice Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6444 Reviewed-by: Giorgio Arena Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- Android.bp | 2 - SConscript | 1 - arm_compute/runtime/CL/CLTypes.h | 10 - .../runtime/CL/functions/CLFullyConnectedLayer.h | 2 +- filelist.json | 1 - src/core/CL/cl_kernels/common/gemm.cl | 7 +- src/core/CL/cl_kernels/common/gemm_v1.cl | 3243 -------------------- src/gpu/cl/ClKernelLibrary.cpp | 14 - src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.cpp | 538 ---- src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h | 88 - .../kernels/ClGemmMatrixMultiplyNativeKernel.cpp | 2 +- .../cl/kernels/ClGemmMatrixMultiplyNativeKernel.h | 2 +- .../native/ClGemmDefaultConfigNativeBifrost.cpp | 2 +- src/gpu/cl/operators/ClFullyConnected.h | 2 +- src/gpu/cl/operators/ClGemm.cpp | 219 +- src/gpu/cl/operators/ClGemm.h | 19 +- .../cl/operators/ClGemmLowpMatrixMultiplyCore.cpp | 1 - src/runtime/CL/gemm/CLGEMMDefaultTypeBifrost.cpp | 33 +- src/runtime/CL/gemm/CLGEMMDefaultTypeMidgard.cpp | 4 +- src/runtime/CL/gemm/CLGEMMDefaultTypeValhall.cpp | 10 +- tests/validate_examples/cl_gemm.cpp | 1 - tests/validation/CL/GEMMMatrixMultiply.cpp | 339 -- .../CL/GEMMMatrixMultiplyInterleavedTransposed.cpp | 334 -- utils/TypePrinter.h | 8 - 24 files changed, 89 insertions(+), 4793 deletions(-) delete mode 100644 src/core/CL/cl_kernels/common/gemm_v1.cl delete mode 100644 src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.cpp delete mode 100644 src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h delete mode 100644 tests/validation/CL/GEMMMatrixMultiply.cpp delete mode 100644 tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp diff --git a/Android.bp b/Android.bp index ccfb2c707f..adcafa65d5 100644 --- a/Android.bp +++ b/Android.bp @@ -34,7 +34,6 @@ opencl_srcs = [ "src/core/CL/cl_kernels/common/floor.cl", "src/core/CL/cl_kernels/common/gather.cl", "src/core/CL/cl_kernels/common/gemm.cl", - "src/core/CL/cl_kernels/common/gemm_v1.cl", "src/core/CL/cl_kernels/common/gemmlowp.cl", "src/core/CL/cl_kernels/common/gemv.cl", "src/core/CL/cl_kernels/common/generate_proposals.cl", @@ -529,7 +528,6 @@ cc_library_static { "src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleByFloatKernel.cpp", "src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleKernel.cpp", "src/gpu/cl/kernels/ClGemmLowpReductionKernel.cpp", - "src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.cpp", "src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp", "src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp", "src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp", diff --git a/SConscript b/SConscript index bcb93fde62..6c5839122a 100644 --- a/SConscript +++ b/SConscript @@ -308,7 +308,6 @@ if env['opencl'] and env['embed_kernels']: 'src/core/CL/cl_kernels/common/gather.cl', 'src/core/CL/cl_kernels/common/gemm.cl', 'src/core/CL/cl_kernels/common/gemv.cl', - 'src/core/CL/cl_kernels/common/gemm_v1.cl', 'src/core/CL/cl_kernels/common/gemmlowp.cl', 'src/core/CL/cl_kernels/common/generate_proposals.cl', 'src/core/CL/cl_kernels/common/generate_proposals_quantized.cl', diff --git a/arm_compute/runtime/CL/CLTypes.h b/arm_compute/runtime/CL/CLTypes.h index cf0486c8c3..bba25c6d64 100644 --- a/arm_compute/runtime/CL/CLTypes.h +++ b/arm_compute/runtime/CL/CLTypes.h @@ -30,18 +30,8 @@ namespace arm_compute /** OpenCL GEMM kernel types */ enum class CLGEMMKernelType { - /** Native GEMM kernel with fixed block size. - * @note Temporary variant to keep compatibility with the old implementation. - * @note This variant will be deprecated in favor of a new and configurable NATIVE variant - */ - NATIVE_V1, /** Native GEMM kernel with configurable block size.*/ NATIVE, - /** Reshaped GEMM kernel where both lhs and rhs matrices are reshaped. Fixed block size fixed. - * @note Temporary variant to keep compatibility with the old implementation. - * @note This variant will be deprecated in favor of RESHAPED - */ - RESHAPED_V1, /** Reshaped GEMM kernel where both lhs and rhs matrices are reshaped. Configurable reshape and block size */ RESHAPED, /** Reshaped GEMM kernel where only the rhs matrix is reshaped. Configurable reshape and block size */ diff --git a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h index 9235a85d2c..2947b4890c 100644 --- a/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h +++ b/arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h @@ -36,7 +36,7 @@ namespace arm_compute * * -# @ref opencl::kernels::ClIm2ColKernel (called when the input comes from a convolutional layer) * -# @ref CLTranspose (if @p are_weights_reshaped is set to false and transpose_weights is set to true ) (called once) - * -# @ref opencl::kernels::ClGemmMatrixMultiplyKernel or @ref CLGEMMLowpMatrixMultiplyCore (if quantized asymmetric) + * -# @ref opencl::ClGemm or @ref CLGEMMLowpMatrixMultiplyCore (if quantized asymmetric) * * @note The fully connected layer accepts "weights" tensors only with 2 dimensions. */ diff --git a/filelist.json b/filelist.json index bcc7ecb37a..5a577b9160 100644 --- a/filelist.json +++ b/filelist.json @@ -476,7 +476,6 @@ "src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp", "src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleByFloatKernel.cpp", "src/gpu/cl/kernels/ClGemmLowpQuantizeDownInt32ScaleKernel.cpp", - "src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.cpp", "src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp", "src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.cpp", "src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.cpp", diff --git a/src/core/CL/cl_kernels/common/gemm.cl b/src/core/CL/cl_kernels/common/gemm.cl index 87921f51fd..431c97becc 100644 --- a/src/core/CL/cl_kernels/common/gemm.cl +++ b/src/core/CL/cl_kernels/common/gemm.cl @@ -4141,6 +4141,7 @@ __kernel void gemm_mm_native(IMAGE_DECLARATION(lhs), REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(M0-1)=0; int i = 0; +#if K0 > 1 for(; i <= (K - K0); i += K0) { // Supported cases (M0, K0): @@ -4186,7 +4187,7 @@ __kernel void gemm_mm_native(IMAGE_DECLARATION(lhs), lhs_offset += K0 * sizeof(DATA_TYPE); rhs_offset += K0 * rhs_stride_y; } - +#endif // K0 > 1 // Left-over accumulations for(; i < K; ++i) { @@ -4292,10 +4293,6 @@ __kernel void gemm_mm_native(IMAGE_DECLARATION(lhs), // Store output block STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x); - -#undef RHS_BLOCK_SIZE -#undef RHS_OFFSET_X -#undef RHS_STEP_X } #endif // defined(M0) && defined(N0) && defined(K0) && defined(K) && defined(DATA_TYPE) diff --git a/src/core/CL/cl_kernels/common/gemm_v1.cl b/src/core/CL/cl_kernels/common/gemm_v1.cl deleted file mode 100644 index a136a1b96b..0000000000 --- a/src/core/CL/cl_kernels/common/gemm_v1.cl +++ /dev/null @@ -1,3243 +0,0 @@ -/* - * Copyright (c) 2020-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "gemm_helpers.h" -#include "repeat.h" - -#if defined(M) && defined(N) && defined(K) && defined(H0) && defined(V0) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) && defined(IN1_DIM_X) -/** This OpenCL kernel is optimised for Midgard. It computes the matrix multiplication between matrix A reshaped (src0) and matrix B reshaped (src1) - * - * @note The number of rows of destination matrix must be passed at compile time using -DM - * @note The number of columns of the destination matrix must be passed at compile time using -DN - * @note The number of rows of the *un-reshaped* matrix B (K) must be passed at compile time using -DK - * @note The number of columns of the reshaped rhs matrix must be passed at compile time using -DIN1_DIM_X - * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1) - * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1) - * @note The optional alpha's value need to be passed at compile time using -DALPHA - * @note The multiplication factor for the transposition width (H0) must be passed at compile time using -DH0 (e.g. -DH0=2) - * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DV0 (e.g. -DV0=2) - * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16) - * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16]) - * - * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively. - * The activation function is performed after the bias addition - * @note In case the output has to be reinterpreted as a 3D tensor (e.g. output of convolution layer), the following information must be passed at compile time: - * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D - * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor. - * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor - * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped - * - * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32 - * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr - * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr - * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes) - * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes) - * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr - * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) - * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) - * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix - * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D) - */ -__kernel void gemm_mm_interleaved_transposed_f32(IMAGE_DECLARATION(src0), - IMAGE_DECLARATION(src1), -#if defined(BETA) - IMAGE_DECLARATION(src2), -#endif // defined(BETA) - IMAGE_DECLARATION(dst), - uint src0_stride_z, - uint src1_stride_z, -#if defined(BETA) - uint src2_stride_z, -#endif //defined(BETA) - uint dst_stride_z -#if defined(REINTERPRET_OUTPUT_AS_3D) - , - uint cross_plane_pad -#endif // REINTERPRET_OUTPUT_AS_3D - ) -{ - int x = get_global_id(0) / H0; - int y = get_global_id(1) / V0; - int z = get_global_id(2); - - // Offset - const int offset_row_a = (get_global_id(1) % V0) * 4; - const int offset_row_b = (get_global_id(0) % H0) * 4; - - // src_addr_a = address of matrix A - // src_addr_b = address of matrix B - int src0_addr_in_bytes = z * src0_stride_z + y * src0_stride_y + src0_offset_first_element_in_bytes; - int src1_addr_in_bytes = x * src1_stride_y + src1_offset_first_element_in_bytes; - -#if defined(MATRIX_B_DEPTH) - // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3 - src1_addr_in_bytes += (z % MATRIX_B_DEPTH) * src1_stride_z; -#else // defined(MATRIX_B_DEPTH) - src1_addr_in_bytes += z * src1_stride_z; -#endif // defined(MATRIX_B_DEPTH) - - __global float *src_addr_a = (__global float *)(src0_ptr + src0_addr_in_bytes); - __global float *src_addr_b = (__global float *)(src1_ptr + src1_addr_in_bytes); - - // Compute end row address for matrix B - __global float *src_end_addr_b = src_addr_b + IN1_DIM_X; - - src_addr_a += offset_row_a; - src_addr_b += offset_row_b; - - // Reset accumulators - float4 c0 = 0.0f; - float4 c1 = 0.0f; - float4 c2 = 0.0f; - float4 c3 = 0.0f; - - for(; src_addr_b <= (src_end_addr_b - (int)(8 * H0)); src_addr_a += 8 * V0, src_addr_b += 8 * H0) - { - // Load values from matrix A (interleaved) and matrix B (transposed) - float4 a0 = vload4(0, src_addr_a); - float4 b0 = vload4(0, src_addr_b); - - c0 += (float4)a0.s0 * b0; - c1 += (float4)a0.s1 * b0; - c2 += (float4)a0.s2 * b0; - c3 += (float4)a0.s3 * b0; - - // Load values from matrix A (interleaved) and matrix B (transposed) - a0 = vload4(0, src_addr_a + 4 * V0); - b0 = vload4(0, src_addr_b + 4 * H0); - - c0 += (float4)a0.s0 * b0; - c1 += (float4)a0.s1 * b0; - c2 += (float4)a0.s2 * b0; - c3 += (float4)a0.s3 * b0; - } - - for(; src_addr_b < src_end_addr_b; src_addr_a += 4 * V0, src_addr_b += 4 * H0) - { - // Load values from matrix A (interleaved) and matrix B (transposed) - float4 a0 = vload4(0, src_addr_a); - float4 b0 = vload4(0, src_addr_b); - - c0 += (float4)a0.s0 * b0; - c1 += (float4)a0.s1 * b0; - c2 += (float4)a0.s2 * b0; - c3 += (float4)a0.s3 * b0; - } - - // Compute destination address - Image dst = CONVERT_TO_IMAGE_STRUCT(dst); - - // Compute dst address - __global uchar *dst_addr = offset(&dst, 0, 0); - - uint4 zout = 0; - -#if defined(REINTERPRET_OUTPUT_AS_3D) - // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zout) is calculated dividing M (get_global_id(1) * 4) by HEIGHT_GEMM3D - zout = ((uint4)(0, 1, 2, 3) + (uint4)(get_global_id(1) * 4)) / (uint4)HEIGHT_GEMM3D; - zout = min(DEPTH_GEMM3D - 1, zout); - - // Add offset due to the cross plane paddings - zout *= (cross_plane_pad * dst_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply dst_stride_z by DEPTH_GEMM3D - dst_addr += z * dst_stride_z * DEPTH_GEMM3D; -#else // defined(REINTERPRET_OUTPUT_AS_3D) - // Add offset for batched GEMM - dst_addr += z * dst_stride_z; -#endif // defined(REINTERPRET_OUTPUT_AS_3D) - - // Multiply by the weight of matrix-matrix product and store the result -#if defined(ALPHA) - SCALE_BLOCK(4, float, c, ALPHA); -#endif // defined(ALPHA) - - // Add beta*bias -#if defined(BETA) - REPEAT_VAR_INIT_TO_CONST(4, uint, zero, 0); - -#if defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)4 * sizeof(float)); - - LOAD_BLOCK(1, 4, float, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(1, float, bias, BETA); -#endif // UNIT_BIAS - - // c = c + bias[broadcasted] - ADD_BLOCK_BROADCAST(4, c, bias0); - -#else // defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)4 * sizeof(float)) + (get_global_id(1) * (uint)4 * src2_stride_y) + get_global_id( - 2) * src2_stride_z; - - LOAD_BLOCK(4, 4, float, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(4, float, bias, BETA); -#endif // UNIT_BIAS - - // c = c + bias - ADD_BLOCK(4, c, bias); - -#endif // defined(BROADCAST_BIAS) -#endif // defined(BETA) - -#if defined(ACTIVATION_TYPE) - ACTIVATION_BLOCK(4, ACTIVATION_TYPE, float, VEC_SIZE, c, A_VAL, B_VAL); -#endif // defined(ACTIVATION_TYPE) - - // Store 4x4 block - const bool cond_y = ((get_global_id(1) + 1) * 4 >= M); - const bool cond_x = ((get_global_id(0) + 1) * 4 >= N); - STORE_BLOCK_BOUNDARY_AWARE(4, 4, float, c, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x); -} - -/** This OpenCL kernel is optimized for Bifrost and tt computes the matrix multiplication between matrix A reshaped (src0) and matrix B reshaped (src1) - * - * @note The number of rows of destination matrix must be passed at compile time using -DM - * @note The number of columns of the destination matrix must be passed at compile time using -DN - * @note The number of rows of the *un-reshaped* matrix B (K) must be passed at compile time using -DK - * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1) - * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1) - * @note The optional alpha's value need to be passed at compile time using -DALPHA - * @note The multiplication factor for the transposition width (H0) must be passed at compile time using -DH0 (e.g. -DH0=2) - * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DV0 (e.g. -DV0=2) - * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16) - * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16]) - * - * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively. - * The activation function is performed after the bias addition - * @note In case the output has to be reinterpreted as a 3D tensor (e.g. output of convolution layer), the following information must be passed at compile time: - * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D - * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor. - * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor - * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped - * - * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32 - * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr - * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr - * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes) - * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes) - * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr - * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) - * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) - * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix - * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D) - */ -__kernel void gemm_mm_interleaved_transposed_f32_bifrost(IMAGE_DECLARATION(src0), - IMAGE_DECLARATION(src1), -#if defined(BETA) - IMAGE_DECLARATION(src2), -#endif // defined(BETA) - IMAGE_DECLARATION(dst), - uint src0_stride_z, - uint src1_stride_z, -#if defined(BETA) - uint src2_stride_z, -#endif //defined(BETA) - uint dst_stride_z -#if defined(REINTERPRET_OUTPUT_AS_3D) - , - uint cross_plane_pad -#endif // REINTERPRET_OUTPUT_AS_3D - ) -{ - int x = get_global_id(0) / H0; - int y = get_global_id(1) / V0; - int z = get_global_id(2); - - // Offset - const int offset_row_a = (get_global_id(1) % V0) * 4; - const int offset_row_b = (get_global_id(0) % H0) * 4; - - // src_addr_a = address of matrix A - // src_addr_b = address of matrix B - int src0_addr_in_bytes = z * src0_stride_z + y * src0_stride_y + src0_offset_first_element_in_bytes; - int src1_addr_in_bytes = x * src1_stride_y + src1_offset_first_element_in_bytes; - -#if defined(MATRIX_B_DEPTH) - // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3 - src1_addr_in_bytes += (z % MATRIX_B_DEPTH) * src1_stride_z; -#else // defined(MATRIX_B_DEPTH) - src1_addr_in_bytes += z * src1_stride_z; -#endif // defined(MATRIX_B_DEPTH) - - __global float *src_addr_a = (__global float *)(src0_ptr + src0_addr_in_bytes); - __global float *src_addr_b = (__global float *)(src1_ptr + src1_addr_in_bytes); - - src_addr_a += offset_row_a; - src_addr_b += offset_row_b; - - // Reset accumulators - float4 c0 = 0.0f; - float4 c1 = 0.0f; - float4 c2 = 0.0f; - float4 c3 = 0.0f; - - int i = 0; - for(; i <= (int)(K - 4); i += 4) - { - // Load values from matrix A (interleaved) and matrix B (transposed) - float4 a0 = vload4(0, src_addr_a); - float4 b0 = vload4(0, src_addr_b); - - src_addr_a += 4 * V0; - src_addr_b += 4 * H0; - - c0.s0 = fma(a0.s0, b0.s0, c0.s0); - c0.s1 = fma(a0.s0, b0.s1, c0.s1); - c0.s2 = fma(a0.s0, b0.s2, c0.s2); - c0.s3 = fma(a0.s0, b0.s3, c0.s3); - - c1.s0 = fma(a0.s1, b0.s0, c1.s0); - c1.s1 = fma(a0.s1, b0.s1, c1.s1); - c1.s2 = fma(a0.s1, b0.s2, c1.s2); - c1.s3 = fma(a0.s1, b0.s3, c1.s3); - - c2.s0 = fma(a0.s2, b0.s0, c2.s0); - c2.s1 = fma(a0.s2, b0.s1, c2.s1); - c2.s2 = fma(a0.s2, b0.s2, c2.s2); - c2.s3 = fma(a0.s2, b0.s3, c2.s3); - - c3.s0 = fma(a0.s3, b0.s0, c3.s0); - c3.s1 = fma(a0.s3, b0.s1, c3.s1); - c3.s2 = fma(a0.s3, b0.s2, c3.s2); - c3.s3 = fma(a0.s3, b0.s3, c3.s3); - - // Load values from matrix A (interleaved) and matrix B (transposed) - a0 = vload4(0, src_addr_a); - b0 = vload4(0, src_addr_b); - - src_addr_a += 4 * V0; - src_addr_b += 4 * H0; - - c0.s0 = fma(a0.s0, b0.s0, c0.s0); - c0.s1 = fma(a0.s0, b0.s1, c0.s1); - c0.s2 = fma(a0.s0, b0.s2, c0.s2); - c0.s3 = fma(a0.s0, b0.s3, c0.s3); - - c1.s0 = fma(a0.s1, b0.s0, c1.s0); - c1.s1 = fma(a0.s1, b0.s1, c1.s1); - c1.s2 = fma(a0.s1, b0.s2, c1.s2); - c1.s3 = fma(a0.s1, b0.s3, c1.s3); - - c2.s0 = fma(a0.s2, b0.s0, c2.s0); - c2.s1 = fma(a0.s2, b0.s1, c2.s1); - c2.s2 = fma(a0.s2, b0.s2, c2.s2); - c2.s3 = fma(a0.s2, b0.s3, c2.s3); - - c3.s0 = fma(a0.s3, b0.s0, c3.s0); - c3.s1 = fma(a0.s3, b0.s1, c3.s1); - c3.s2 = fma(a0.s3, b0.s2, c3.s2); - c3.s3 = fma(a0.s3, b0.s3, c3.s3); - - // Load values from matrix A (interleaved) and matrix B (transposed) - a0 = vload4(0, src_addr_a); - b0 = vload4(0, src_addr_b); - - src_addr_a += 4 * V0; - src_addr_b += 4 * H0; - - c0.s0 = fma(a0.s0, b0.s0, c0.s0); - c0.s1 = fma(a0.s0, b0.s1, c0.s1); - c0.s2 = fma(a0.s0, b0.s2, c0.s2); - c0.s3 = fma(a0.s0, b0.s3, c0.s3); - - c1.s0 = fma(a0.s1, b0.s0, c1.s0); - c1.s1 = fma(a0.s1, b0.s1, c1.s1); - c1.s2 = fma(a0.s1, b0.s2, c1.s2); - c1.s3 = fma(a0.s1, b0.s3, c1.s3); - - c2.s0 = fma(a0.s2, b0.s0, c2.s0); - c2.s1 = fma(a0.s2, b0.s1, c2.s1); - c2.s2 = fma(a0.s2, b0.s2, c2.s2); - c2.s3 = fma(a0.s2, b0.s3, c2.s3); - - c3.s0 = fma(a0.s3, b0.s0, c3.s0); - c3.s1 = fma(a0.s3, b0.s1, c3.s1); - c3.s2 = fma(a0.s3, b0.s2, c3.s2); - c3.s3 = fma(a0.s3, b0.s3, c3.s3); - - // Load values from matrix A (interleaved) and matrix B (transposed) - a0 = vload4(0, src_addr_a); - b0 = vload4(0, src_addr_b); - - src_addr_a += 4 * V0; - src_addr_b += 4 * H0; - - c0.s0 = fma(a0.s0, b0.s0, c0.s0); - c0.s1 = fma(a0.s0, b0.s1, c0.s1); - c0.s2 = fma(a0.s0, b0.s2, c0.s2); - c0.s3 = fma(a0.s0, b0.s3, c0.s3); - - c1.s0 = fma(a0.s1, b0.s0, c1.s0); - c1.s1 = fma(a0.s1, b0.s1, c1.s1); - c1.s2 = fma(a0.s1, b0.s2, c1.s2); - c1.s3 = fma(a0.s1, b0.s3, c1.s3); - - c2.s0 = fma(a0.s2, b0.s0, c2.s0); - c2.s1 = fma(a0.s2, b0.s1, c2.s1); - c2.s2 = fma(a0.s2, b0.s2, c2.s2); - c2.s3 = fma(a0.s2, b0.s3, c2.s3); - - c3.s0 = fma(a0.s3, b0.s0, c3.s0); - c3.s1 = fma(a0.s3, b0.s1, c3.s1); - c3.s2 = fma(a0.s3, b0.s2, c3.s2); - c3.s3 = fma(a0.s3, b0.s3, c3.s3); - } - - for(; i < (int)K; ++i) - { - // Load values from matrix A (interleaved) and matrix B (transposed) - float4 a0 = vload4(0, src_addr_a); - float4 b0 = vload4(0, src_addr_b); - - src_addr_a += 4 * V0; - src_addr_b += 4 * H0; - - c0.s0 = fma(a0.s0, b0.s0, c0.s0); - c0.s1 = fma(a0.s0, b0.s1, c0.s1); - c0.s2 = fma(a0.s0, b0.s2, c0.s2); - c0.s3 = fma(a0.s0, b0.s3, c0.s3); - - c1.s0 = fma(a0.s1, b0.s0, c1.s0); - c1.s1 = fma(a0.s1, b0.s1, c1.s1); - c1.s2 = fma(a0.s1, b0.s2, c1.s2); - c1.s3 = fma(a0.s1, b0.s3, c1.s3); - - c2.s0 = fma(a0.s2, b0.s0, c2.s0); - c2.s1 = fma(a0.s2, b0.s1, c2.s1); - c2.s2 = fma(a0.s2, b0.s2, c2.s2); - c2.s3 = fma(a0.s2, b0.s3, c2.s3); - - c3.s0 = fma(a0.s3, b0.s0, c3.s0); - c3.s1 = fma(a0.s3, b0.s1, c3.s1); - c3.s2 = fma(a0.s3, b0.s2, c3.s2); - c3.s3 = fma(a0.s3, b0.s3, c3.s3); - } - - // Compute destination address - Image dst = CONVERT_TO_IMAGE_STRUCT(dst); - - // Compute dst address - __global uchar *dst_addr = offset(&dst, 0, 0); - - uint4 zout = 0; - -#if defined(REINTERPRET_OUTPUT_AS_3D) - // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zout) is calculated dividing M (get_global_id(1) * 4) by HEIGHT_GEMM3D - zout = ((uint4)(0, 1, 2, 3) + (uint4)(get_global_id(1) * 4)) / (uint4)HEIGHT_GEMM3D; - zout = min(DEPTH_GEMM3D - 1, zout); - - // Add offset due to the cross plane paddings - zout *= (cross_plane_pad * dst_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply dst_stride_z by DEPTH_GEMM3D - dst_addr += z * dst_stride_z * DEPTH_GEMM3D; -#else // defined(REINTERPRET_OUTPUT_AS_3D) - // Add offset for batched GEMM - dst_addr += z * dst_stride_z; -#endif // defined(REINTERPRET_OUTPUT_AS_3D) - - // Multiply by the weight of matrix-matrix product and store the result -#if defined(ALPHA) - SCALE_BLOCK(4, float, c, ALPHA); -#endif // defined(ALPHA) - - // Add beta*bias -#if defined(BETA) - REPEAT_VAR_INIT_TO_CONST(4, uint, zero, 0); - -#if defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)4 * sizeof(float)); - - LOAD_BLOCK(1, 4, float, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(1, float, bias, BETA); -#endif // UNIT_BIAS - - // c = c + bias[broadcasted] - ADD_BLOCK_BROADCAST(4, c, bias0); - -#else // defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)4 * sizeof(float)) + (get_global_id(1) * (uint)4 * src2_stride_y) + get_global_id( - 2) * src2_stride_z; - - LOAD_BLOCK(4, 4, float, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(4, float, bias, BETA); -#endif // UNIT_BIAS - - // c = c + bias - ADD_BLOCK(4, c, bias); - -#endif // defined(BROADCAST_BIAS) -#endif // defined(BETA) - -#if defined(ACTIVATION_TYPE) - ACTIVATION_BLOCK(4, ACTIVATION_TYPE, float, VEC_SIZE, c, A_VAL, B_VAL); -#endif // defined(ACTIVATION_TYPE) - - // Store 4x4 block - const bool cond_y = ((get_global_id(1) + 1) * 4 >= M); - const bool cond_x = ((get_global_id(0) + 1) * 4 >= N); - STORE_BLOCK_BOUNDARY_AWARE(4, 4, float, c, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x); -} - -#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) -/** This OpenCL kernel computes the matrix multiplication between matrix A reshaped (src0) and matrix B reshaped (src1) - * - * @note The number of rows of destination matrix must be passed at compile time using -DM - * @note The number of columns of the destination matrix must be passed at compile time using -DN - * @note The number of rows of the *un-reshaped* matrix B (K) must be passed at compile time using -DK - * @note The number of columns of the reshaped rhs matrix must be passed at compile time using -DIN1_DIM_X - * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1) - * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1) - * @note The optional alpha's value need to be passed at compile time using -DALPHA - * @note The multiplication factor for the transposition width (H0) must be passed at compile time using -DH0 (e.g. -DH0=2) - * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DV0 (e.g. -DV0=2) - * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16) - * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16]) - * - * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively. - * The activation function is performed after the bias addition - * @note In case the output has to be reinterpreted as a 3D tensor (e.g. output of convolution layer), the following information must be passed at compile time: - * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D - * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor. - * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor - * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped - * - * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16 - * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr - * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr - * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes) - * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes) - * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr - * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) - * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) - * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix - * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D) - */ -__kernel void gemm_mm_interleaved_transposed_f16(IMAGE_DECLARATION(src0), - IMAGE_DECLARATION(src1), -#if defined(BETA) - IMAGE_DECLARATION(src2), -#endif // defined(BETA) - IMAGE_DECLARATION(dst), - uint src0_stride_z, - uint src1_stride_z, -#if defined(BETA) - uint src2_stride_z, -#endif //defined(BETA) - uint dst_stride_z -#if defined(REINTERPRET_OUTPUT_AS_3D) - , - uint cross_plane_pad -#endif // REINTERPRET_OUTPUT_AS_3D - ) -{ - int x = get_global_id(0) / H0; - int y = get_global_id(1) / V0; - int z = get_global_id(2); - - // Offset - const int offset_row_a = (get_global_id(1) % V0) * 4; - const int offset_row_b = (get_global_id(0) % H0) * 8; - - // src_addr_a = address of matrix A - // src_addr_b = address of matrix B - int src0_addr_in_bytes = z * src0_stride_z + y * src0_stride_y + src0_offset_first_element_in_bytes; - int src1_addr_in_bytes = x * src1_stride_y + src1_offset_first_element_in_bytes; - -#if defined(MATRIX_B_DEPTH) - // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3 - src1_addr_in_bytes += (z % MATRIX_B_DEPTH) * src1_stride_z; -#else // defined(MATRIX_B_DEPTH) - src1_addr_in_bytes += z * src1_stride_z; -#endif // defined(MATRIX_B_DEPTH) - - __global half *src_addr_a = (__global half *)(src0_ptr + src0_addr_in_bytes); - __global half *src_addr_b = (__global half *)(src1_ptr + src1_addr_in_bytes); - - // Compute end row address for matrix B - __global half *src_end_addr_b = src_addr_b + IN1_DIM_X; - - src_addr_a += offset_row_a; - src_addr_b += offset_row_b; - - // Reset accumulators - half8 c0 = 0.0f; - half8 c1 = 0.0f; - half8 c2 = 0.0f; - half8 c3 = 0.0f; - - for(; src_addr_b <= (src_end_addr_b - (int)(16 * H0)); src_addr_a += 8 * V0, src_addr_b += 16 * H0) - { - // Load values from matrix A (interleaved) and matrix B (transposed) - half4 a0 = vload4(0, src_addr_a); - half8 b0 = vload8(0, src_addr_b); - - c0 += (half8)a0.s0 * b0; - c1 += (half8)a0.s1 * b0; - c2 += (half8)a0.s2 * b0; - c3 += (half8)a0.s3 * b0; - - // Load values from matrix A (interleaved) and matrix B (transposed) - a0 = vload4(0, src_addr_a + 4 * V0); - b0 = vload8(0, src_addr_b + 8 * H0); - - c0 += (half8)a0.s0 * b0; - c1 += (half8)a0.s1 * b0; - c2 += (half8)a0.s2 * b0; - c3 += (half8)a0.s3 * b0; - } - - for(; src_addr_b < src_end_addr_b; src_addr_a += 4 * V0, src_addr_b += 8 * H0) - { - // Load values from matrix A (interleaved) and matrix B (transposed) - half4 a0 = vload4(0, src_addr_a); - half8 b0 = vload8(0, src_addr_b); - - c0 += (half8)a0.s0 * b0; - c1 += (half8)a0.s1 * b0; - c2 += (half8)a0.s2 * b0; - c3 += (half8)a0.s3 * b0; - } - - // Compute destination address - Image dst = CONVERT_TO_IMAGE_STRUCT(dst); - - // Compute dst address - __global uchar *dst_addr = offset(&dst, 0, 0); - - uint4 zout = 0; - -#if defined(REINTERPRET_OUTPUT_AS_3D) - // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zout) is calculated dividing M (get_global_id(1) * 4) by HEIGHT_GEMM3D - zout = ((uint4)(0, 1, 2, 3) + (uint4)(get_global_id(1) * 4)) / (uint4)HEIGHT_GEMM3D; - zout = min(DEPTH_GEMM3D - 1, zout); - - // Add offset due to the cross plane paddings - zout *= (cross_plane_pad * dst_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply dst_stride_z by DEPTH_GEMM3D - dst_addr += z * dst_stride_z * DEPTH_GEMM3D; -#else // defined(REINTERPRET_OUTPUT_AS_3D) - // Add offset for batched GEMM - dst_addr += z * dst_stride_z; -#endif // defined(REINTERPRET_OUTPUT_AS_3D) - - // Multiply by the weight of matrix-matrix product and store the result -#if defined(ALPHA) - SCALE_BLOCK(4, half, c, ALPHA); -#endif // defined(ALPHA) - - // Add beta*bias -#if defined(BETA) - REPEAT_VAR_INIT_TO_CONST(4, uint, zero, 0); - -#if defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)); - - LOAD_BLOCK(1, 8, half, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(1, half, bias, BETA); -#endif // UNIT_BIAS - - // c = c + bias[broadcasted] - ADD_BLOCK_BROADCAST(4, c, bias0); - -#else // defined(BROADCAST_BIAS) - - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)) + (get_global_id(1) * (uint)4 * src2_stride_y) + get_global_id( - 2) * src2_stride_z; - - LOAD_BLOCK(4, 8, half, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(4, half, bias, BETA); -#endif // UNIT_BIAS - - // c = c + bias - ADD_BLOCK(4, c, bias); - -#endif // defined(BROADCAST_BIAS) -#endif // defined(BETA) - -#if defined(ACTIVATION_TYPE) - ACTIVATION_BLOCK(4, ACTIVATION_TYPE, half, VEC_SIZE, c, A_VAL, B_VAL); -#endif // defined(ACTIVATION_TYPE) - - // Store 4x8 block - const bool cond_y = ((get_global_id(1) + 1) * 4 >= M); - const bool cond_x = ((get_global_id(0) + 1) * 8 >= N); - STORE_BLOCK_BOUNDARY_AWARE(4, 8, half, c, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x); -} - -/** This OpenCL kernel computes the matrix multiplication between matrix A reshaped (src0) and matrix B reshaped (src1) while accumulating the result in a 32 floating point variable. - * - * @note The number of rows of destination matrix must be passed at compile time using -DM - * @note The number of columns of the destination matrix must be passed at compile time using -DN - * @note The number of rows of the *un-reshaped* matrix B (K) must be passed at compile time using -DK - * @note The number of columns of the reshaped rhs matrix must be passed at compile time using -DIN1_DIM_X - * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1) - * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1) - * @note The optional alpha's value need to be passed at compile time using -DALPHA - * @note The multiplication factor for the transposition width (H0) must be passed at compile time using -DH0 (e.g. -DH0=2) - * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DV0 (e.g. -DV0=2) - * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16) - * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16]) - * - * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively. - * The activation function is performed after the bias addition - * @note In case the output has to be reinterpreted as a 3D tensor (e.g. output of convolution layer), the following information must be passed at compile time: - * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D - * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor. - * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor - * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped - * - * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16 - * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr - * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr - * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes) - * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes) - * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr - * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) - * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) - * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix - * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D) - */ -__kernel void gemm_mm_interleaved_transposed_f16_acc32(IMAGE_DECLARATION(src0), - IMAGE_DECLARATION(src1), -#if defined(BETA) - IMAGE_DECLARATION(src2), -#endif // defined(BETA) - IMAGE_DECLARATION(dst), - uint src0_stride_z, - uint src1_stride_z, -#if defined(BETA) - uint src2_stride_z, -#endif //defined(BETA) - uint dst_stride_z -#if defined(REINTERPRET_OUTPUT_AS_3D) - , - uint cross_plane_pad -#endif // REINTERPRET_OUTPUT_AS_3D - ) -{ - int x = get_global_id(0) / H0; - int y = get_global_id(1) / V0; - int z = get_global_id(2); - - // Offset - const int offset_row_a = (get_global_id(1) % V0) * 4; - const int offset_row_b = (get_global_id(0) % H0) * 8; - - // src_addr_a = address of matrix A - // src_addr_b = address of matrix B - int src0_addr_in_bytes = z * src0_stride_z + y * src0_stride_y + src0_offset_first_element_in_bytes; - int src1_addr_in_bytes = x * src1_stride_y + src1_offset_first_element_in_bytes; - -#if defined(MATRIX_B_DEPTH) - // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3 - src1_addr_in_bytes += (z % MATRIX_B_DEPTH) * src1_stride_z; -#else // defined(MATRIX_B_DEPTH) - src1_addr_in_bytes += z * src1_stride_z; -#endif // defined(MATRIX_B_DEPTH) - - __global half *src_addr_a = (__global half *)(src0_ptr + src0_addr_in_bytes); - __global half *src_addr_b = (__global half *)(src1_ptr + src1_addr_in_bytes); - - // Compute end row address for matrix B - __global half *src_end_addr_b = src_addr_b + IN1_DIM_X; - - src_addr_a += offset_row_a; - src_addr_b += offset_row_b; - - // Reset accumulators - float8 c0 = 0.0f; - float8 c1 = 0.0f; - float8 c2 = 0.0f; - float8 c3 = 0.0f; - - for(; src_addr_b <= (src_end_addr_b - (int)(16 * H0)); src_addr_a += 8 * V0, src_addr_b += 16 * H0) - { - // Load values from matrix A (interleaved) and matrix B (transposed) - float4 a0 = convert_float4(vload4(0, src_addr_a)); - float8 b0 = convert_float8(vload8(0, src_addr_b)); - - c0 += (float8)a0.s0 * b0; - c1 += (float8)a0.s1 * b0; - c2 += (float8)a0.s2 * b0; - c3 += (float8)a0.s3 * b0; - - // Load values from matrix A (interleaved) and matrix B (transposed) - a0 = convert_float4(vload4(0, src_addr_a + 4 * V0)); - b0 = convert_float8(vload8(0, src_addr_b + 8 * H0)); - - c0 += (float8)a0.s0 * b0; - c1 += (float8)a0.s1 * b0; - c2 += (float8)a0.s2 * b0; - c3 += (float8)a0.s3 * b0; - } - - for(; src_addr_b < src_end_addr_b; src_addr_a += 4 * V0, src_addr_b += 8 * H0) - { - // Load values from matrix A (interleaved) and matrix B (transposed) - float4 a0 = convert_float4(vload4(0, src_addr_a)); - float8 b0 = convert_float8(vload8(0, src_addr_b)); - - c0 += (float8)a0.s0 * b0; - c1 += (float8)a0.s1 * b0; - c2 += (float8)a0.s2 * b0; - c3 += (float8)a0.s3 * b0; - } - - // Compute destination address - Image dst = CONVERT_TO_IMAGE_STRUCT(dst); - - // Compute dst address - __global uchar *dst_addr = offset(&dst, 0, 0); - - uint4 zout = 0; - -#if defined(REINTERPRET_OUTPUT_AS_3D) - // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zout) is calculated dividing M (get_global_id(1) * 4) by HEIGHT_GEMM3D - zout = ((uint4)(0, 1, 2, 3) + (uint4)(get_global_id(1) * 4)) / (uint4)HEIGHT_GEMM3D; - zout = min(DEPTH_GEMM3D - 1, zout); - - // Add offset due to the cross plane paddings - zout *= (cross_plane_pad * dst_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply dst_stride_z by DEPTH_GEMM3D - dst_addr += z * dst_stride_z * DEPTH_GEMM3D; -#else // defined(REINTERPRET_OUTPUT_AS_3D) - // Add offset for batched GEMM - dst_addr += z * dst_stride_z; -#endif // defined(REINTERPRET_OUTPUT_AS_3D) - - // Multiply by the weight of matrix-matrix product and store the result -#if defined(ALPHA) - SCALE_BLOCK(4, float, c, ALPHA); -#endif // defined(ALPHA) - -#if defined(BETA) - REPEAT_VAR_INIT_TO_CONST(4, uint, zero, 0); - -#if defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)); - - LOAD_BLOCK(1, 8, half, bias, src2_addr, 0, src2_stride_y, zero); - - float8 bias_f0 = convert_float8(bias0); - -#ifndef UNIT_BETA - SCALE_BLOCK(1, float, bias_f, BETA); -#endif // UNIT_BIAS - - // c = c + bias[broadcasted] - ADD_BLOCK_BROADCAST(4, c, bias_f0); - -#else // defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)) + (get_global_id(1) * (uint)4 * src2_stride_y) + get_global_id( - 2) * src2_stride_z; - - LOAD_BLOCK(4, 8, half, bias, src2_addr, 0, src2_stride_y, zero); - - float8 bias_f0 = convert_float8(bias0); - float8 bias_f1 = convert_float8(bias1); - float8 bias_f2 = convert_float8(bias2); - float8 bias_f3 = convert_float8(bias3); - -#ifndef UNIT_BETA - SCALE_BLOCK(4, float, bias_f, BETA); -#endif // UNIT_BIAS - - // c = c + bias - ADD_BLOCK(4, c, bias_f); - -#endif // defined(BROADCAST_BIAS) -#endif // defined(BETA) - - half8 c_h0 = convert_half8(c0); - half8 c_h1 = convert_half8(c1); - half8 c_h2 = convert_half8(c2); - half8 c_h3 = convert_half8(c3); - -#if defined(ACTIVATION_TYPE) - ACTIVATION_BLOCK(4, ACTIVATION_TYPE, half, VEC_SIZE, c_h, A_VAL, B_VAL); -#endif // defined(ACTIVATION_TYPE) - - // Store 4x8 block - const bool cond_y = ((get_global_id(1) + 1) * 4 >= M); - const bool cond_x = ((get_global_id(0) + 1) * 8 >= N); - STORE_BLOCK_BOUNDARY_AWARE(4, 8, half, c_h, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x); -} - -/** This OpenCL kernel optimized for Bifrost architectures computes the matrix multiplication between matrix A reshaped (src0) and matrix B reshaped (src1) - * - * @note The number of rows of destination matrix must be passed at compile time using -DM - * @note The number of columns of the destination matrix must be passed at compile time using -DN - * @note The number of rows of the *un-reshaped* matrix B (K) must be passed at compile time using -DK - * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1) - * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1) - * @note The optional alpha's value need to be passed at compile time using -DALPHA - * @note The multiplication factor for the transposition width (H0) must be passed at compile time using -DH0 (e.g. -DH0=2) - * @note The multiplication factor for the height of the 4x4 interleaved block must be passed at compile time using -DV0 (e.g. -DV0=2) - * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16) - * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16]) - * - * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively. - * The activation function is performed after the bias addition - * @note In case the output has to be reinterpreted as a 3D tensor (e.g. output of convolution layer), the following information must be passed at compile time: - * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D - * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor. - * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor - * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped - * - * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16 - * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr - * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr - * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes) - * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes) - * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr - * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) - * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) - * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix - * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes) - * @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D) - */ -__kernel void gemm_mm_interleaved_transposed_f16_bifrost(IMAGE_DECLARATION(src0), - IMAGE_DECLARATION(src1), -#if defined(BETA) - IMAGE_DECLARATION(src2), -#endif // defined(BETA) - IMAGE_DECLARATION(dst), - uint src0_stride_z, - uint src1_stride_z, -#if defined(BETA) - uint src2_stride_z, -#endif //defined(BETA) - uint dst_stride_z -#if defined(REINTERPRET_OUTPUT_AS_3D) - , - uint cross_plane_pad -#endif // REINTERPRET_OUTPUT_AS_3D - ) -{ - int x = get_global_id(0) / H0; - int y = get_global_id(1) / V0; - int z = get_global_id(2); - - // Offset - const int offset_row_a = (get_global_id(1) % V0) * 4; - const int offset_row_b = (get_global_id(0) % H0) * 8; - - // src_addr_a = address of matrix A - // src_addr_b = address of matrix B - int src0_addr_in_bytes = z * src0_stride_z + y * src0_stride_y + src0_offset_first_element_in_bytes; - int src1_addr_in_bytes = x * src1_stride_y + src1_offset_first_element_in_bytes; - -#if defined(MATRIX_B_DEPTH) - // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3 - src1_addr_in_bytes += (z % MATRIX_B_DEPTH) * src1_stride_z; -#else // defined(MATRIX_B_DEPTH) - src1_addr_in_bytes += z * src1_stride_z; -#endif // defined(MATRIX_B_DEPTH) - - __global half *src_addr_a = (__global half *)(src0_ptr + src0_addr_in_bytes); - __global half *src_addr_b = (__global half *)(src1_ptr + src1_addr_in_bytes); - - src_addr_a += offset_row_a; - src_addr_b += offset_row_b; - - // Reset accumulators - half8 c0 = 0.0f; - half8 c1 = 0.0f; - half8 c2 = 0.0f; - half8 c3 = 0.0f; - - int i = 0; - for(; i <= (int)(K - 4); i += 4) - { -#if V0 == 1 - // Load values from matrix A (interleaved) and matrix B (transposed) - half8 a0 = vload8(0, src_addr_a); - half8 b0 = vload8(0, src_addr_b); - - src_addr_a += 8 * V0; - src_addr_b += 8 * H0; - - c0 = fma((half8)a0.s0, b0, c0); - c1 = fma((half8)a0.s1, b0, c1); - c2 = fma((half8)a0.s2, b0, c2); - c3 = fma((half8)a0.s3, b0, c3); - - // Load values from matrix B (transposed) - b0 = vload8(0, src_addr_b); - - src_addr_b += 8 * H0; - - c0 = fma((half8)a0.s4, b0, c0); - c1 = fma((half8)a0.s5, b0, c1); - c2 = fma((half8)a0.s6, b0, c2); - c3 = fma((half8)a0.s7, b0, c3); - - // Load values from matrix A (interleaved) and matrix B (transposed) - a0 = vload8(0, src_addr_a); - b0 = vload8(0, src_addr_b); - - src_addr_a += 8 * V0; - src_addr_b += 8 * H0; - - c0 = fma((half8)a0.s0, b0, c0); - c1 = fma((half8)a0.s1, b0, c1); - c2 = fma((half8)a0.s2, b0, c2); - c3 = fma((half8)a0.s3, b0, c3); - - // Load values from matrix B (transposed) - b0 = vload8(0, src_addr_b); - - src_addr_b += 8 * H0; - - c0 = fma((half8)a0.s4, b0, c0); - c1 = fma((half8)a0.s5, b0, c1); - c2 = fma((half8)a0.s6, b0, c2); - c3 = fma((half8)a0.s7, b0, c3); -#else // V0 == 1 - // Load values from matrix A (interleaved) and matrix B (transposed) - half4 a0 = vload4(0, src_addr_a); - half8 b0 = vload8(0, src_addr_b); - - src_addr_a += 4 * V0; - src_addr_b += 8 * H0; - - c0 = fma((half8)a0.s0, b0, c0); - c1 = fma((half8)a0.s1, b0, c1); - c2 = fma((half8)a0.s2, b0, c2); - c3 = fma((half8)a0.s3, b0, c3); - - // Load values from matrix A (interleaved) and matrix B (transposed) - a0 = vload4(0, src_addr_a); - b0 = vload8(0, src_addr_b); - - src_addr_a += 4 * V0; - src_addr_b += 8 * H0; - - c0 = fma((half8)a0.s0, b0, c0); - c1 = fma((half8)a0.s1, b0, c1); - c2 = fma((half8)a0.s2, b0, c2); - c3 = fma((half8)a0.s3, b0, c3); - - // Load values from matrix A (interleaved) and matrix B (transposed) - a0 = vload4(0, src_addr_a); - b0 = vload8(0, src_addr_b); - - src_addr_a += 4 * V0; - src_addr_b += 8 * H0; - - c0 = fma((half8)a0.s0, b0, c0); - c1 = fma((half8)a0.s1, b0, c1); - c2 = fma((half8)a0.s2, b0, c2); - c3 = fma((half8)a0.s3, b0, c3); - - // Load values from matrix A (interleaved) and matrix B (transposed) - a0 = vload4(0, src_addr_a); - b0 = vload8(0, src_addr_b); - - src_addr_a += 4 * V0; - src_addr_b += 8 * H0; - - c0 = fma((half8)a0.s0, b0, c0); - c1 = fma((half8)a0.s1, b0, c1); - c2 = fma((half8)a0.s2, b0, c2); - c3 = fma((half8)a0.s3, b0, c3); -#endif // V0 == 1 - } - - for(; i < (int)K; ++i) - { - // Load values from matrix A (interleaved) and matrix B (transposed) - half4 a0 = vload4(0, src_addr_a); - half8 b0 = vload8(0, src_addr_b); - - src_addr_a += 4 * V0; - src_addr_b += 8 * H0; - - c0 = fma((half8)a0.s0, b0, c0); - c1 = fma((half8)a0.s1, b0, c1); - c2 = fma((half8)a0.s2, b0, c2); - c3 = fma((half8)a0.s3, b0, c3); - } - - // Compute destination address - Image dst = CONVERT_TO_IMAGE_STRUCT(dst); - - // Compute dst address - __global uchar *dst_addr = offset(&dst, 0, 0); - - uint4 zout = 0; - -#if defined(REINTERPRET_OUTPUT_AS_3D) - // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zout) is calculated dividing M (get_global_id(1) * 4) by HEIGHT_GEMM3D - zout = ((uint4)(0, 1, 2, 3) + (uint4)(get_global_id(1) * 4)) / (uint4)HEIGHT_GEMM3D; - zout = min(DEPTH_GEMM3D - 1, zout); - - // Add offset due to the cross plane paddings - zout *= (cross_plane_pad * dst_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply dst_stride_z by DEPTH_GEMM3D - dst_addr += z * dst_stride_z * DEPTH_GEMM3D; -#else // defined(REINTERPRET_OUTPUT_AS_3D) - // Add offset for batched GEMM - dst_addr += z * dst_stride_z; -#endif // defined(REINTERPRET_OUTPUT_AS_3D) - - // Multiply by the weight of matrix-matrix product and store the result -#if defined(ALPHA) - SCALE_BLOCK(4, half, c, ALPHA); -#endif // defined(ALPHA) - - // Add beta*bias -#if defined(BETA) - REPEAT_VAR_INIT_TO_CONST(4, uint, zero, 0); - -#if defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)); - - LOAD_BLOCK(1, 8, half, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(1, half, bias, BETA); -#endif // UNIT_BIAS - - // c = c + bias[broadcasted] - ADD_BLOCK_BROADCAST(4, c, bias0); - -#else // defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)) + (get_global_id(1) * (uint)4 * src2_stride_y) + get_global_id( - 2) * src2_stride_z; - - LOAD_BLOCK(4, 8, half, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(4, half, bias, BETA); -#endif // UNIT_BIAS - - // c = c + bias - ADD_BLOCK(4, c, bias); - -#endif // defined(BROADCAST_BIAS) -#endif // defined(BETA) - -#if defined(ACTIVATION_TYPE) - ACTIVATION_BLOCK(4, ACTIVATION_TYPE, half, VEC_SIZE, c, A_VAL, B_VAL); -#endif // defined(ACTIVATION_TYPE) - - // Store 4x8 block - const bool cond_y = ((get_global_id(1) + 1) * 4 >= M); - const bool cond_x = ((get_global_id(0) + 1) * 8 >= N); - STORE_BLOCK_BOUNDARY_AWARE(4, 8, half, c, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x); -} - -#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) - -#endif // defined(M) && defined(N) && defined(K) && defined(H0) && defined(V0) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) && defined(IN1_DIM_X) - -#if defined(N) && defined(K) && defined(M0) && defined(N0) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) -#if defined(DATA_TYPE) -#define VECTOR_TYPE VEC_DATA_TYPE(DATA_TYPE, N0) -/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not been reshaped. - * - * @note This OpenCL kernel works with floating point data types (F16/F32) - * @note The floating point data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float) - * @note The number of elements processed along the x and y directions must be passed at compile time using -DN0 and -DM0 - * @note The number of columns of matrix A and the number of columns of the matrix B need to be passed at compile time using -DK and -DN - * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1) - * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1) - * @note The optional alpha's value need to be passed at compile time using -DALPHA - * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16) - * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16]) - * - * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively. - * The activation function is performed after the bias addition - * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time: - * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D - * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D - * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor. - * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor - * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped - * - * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16/F32 - * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr - * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr - * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes) - * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes) - * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr - * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) - * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) - * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix - * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] src_cross_plane_pad (Optional) Bottom paddings in unit of elements for the input tensor (only if defined REINTERPRET_INPUT_AS_3D) - * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements for the output tensor (only if defined REINTERPRET_OUTPUT_AS_3D) - */ -__kernel void gemm_mm_floating_point(IMAGE_DECLARATION(src0), - IMAGE_DECLARATION(src1), -#if defined(BETA) - IMAGE_DECLARATION(src2), -#endif // defined(BETA) - IMAGE_DECLARATION(dst), - uint src0_stride_z, - uint src1_stride_z, -#if defined(BETA) - uint src2_stride_z, -#endif //defined(BETA) - uint dst_stride_z -#if defined(REINTERPRET_INPUT_AS_3D) - , - uint src_cross_plane_pad -#endif // REINTERPRET_INPUT_AS_3D -#if defined(REINTERPRET_OUTPUT_AS_3D) - , - uint dst_cross_plane_pad -#endif // REINTERPRET_OUTPUT_AS_3D - ) -{ - int idx = get_global_id(0) * N0; - - // Compute starting address for matrix A and Matrix B - int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes)); - - // Update address for the matrix A - src_addr.s0 += COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0) * src0_stride_y; - - // Update address for the matrix B - src_addr.s1 += idx * sizeof(DATA_TYPE); - -#if defined(REINTERPRET_INPUT_AS_3D) - // Since we load a 2D input tile from a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zin) is calculated dividing row by HEIGHT_GEMM3D - uint4 zin = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D; - zin = min(DEPTH_GEMM3D - 1, zin); - - // Add offset due to the cross plane paddings - zin *= (src_cross_plane_pad * src0_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply src0_stride_z by DEPTH_GEMM3D - src_addr.s0 += get_global_id(2) * src0_stride_z * DEPTH_GEMM3D; - -#else // defined(REINTERPRET_INPUT_AS_3D) - - // Add offset for batched GEMM - src_addr.s0 += get_global_id(2) * src0_stride_z; - -#endif // defined(REINTERPRET_INPUT_AS_3D) - -#if defined(MATRIX_B_DEPTH) - // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3 - src_addr.s1 += (get_global_id(2) % MATRIX_B_DEPTH) * src1_stride_z; -#else // defined(MATRIX_B_DEPTH) - src_addr.s1 += get_global_id(2) * src1_stride_z; -#endif // defined(MATRIX_B_DEPTH) - - int end_row_vec_a = src_addr.s0 + (K * sizeof(DATA_TYPE)); - - VECTOR_TYPE acc0 = 0.0f; -#if M0 > 1 - VECTOR_TYPE acc1 = 0.0f; -#endif // M0 > 1 -#if M0 > 2 - VECTOR_TYPE acc2 = 0.0f; -#endif // M0 > 2 -#if M0 > 3 - VECTOR_TYPE acc3 = 0.0f; -#endif // M0 > 3 - - for(; src_addr.s0 <= (end_row_vec_a - 2 * (int)sizeof(DATA_TYPE)); src_addr += (int2)(2 * sizeof(DATA_TYPE), 2 * src1_stride_y)) - { -#if defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - LOAD_BLOCK(M0, 2, DATA_TYPE, a, src0_ptr, src_addr.s0, src0_stride_y, zin.s); -#else // defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - VEC_DATA_TYPE(DATA_TYPE, 2) - a0 = vload2(0, (__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y)); -#if M0 > 1 - VEC_DATA_TYPE(DATA_TYPE, 2) - a1 = vload2(0, (__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y)); -#endif // M0 > 1 -#if M0 > 2 - VEC_DATA_TYPE(DATA_TYPE, 2) - a2 = vload2(0, (__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y)); -#endif // M0 > 2 -#if M0 > 3 - VEC_DATA_TYPE(DATA_TYPE, 2) - a3 = vload2(0, (__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y)); -#endif // M0 > 3 -#endif // defined(REINTERPRET_INPUT_AS_3D) - - // Load values from matrix B - VECTOR_TYPE b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(src1_ptr + src_addr.s1)); - VECTOR_TYPE b1 = VLOAD(N0)(0, (__global DATA_TYPE *)(src1_ptr + src_addr.s1 + src1_stride_y)); - - // Accumulate - acc0 += b0 * (VECTOR_TYPE)a0.s0; - acc0 += b1 * (VECTOR_TYPE)a0.s1; -#if M0 > 1 - acc1 += b0 * (VECTOR_TYPE)a1.s0; - acc1 += b1 * (VECTOR_TYPE)a1.s1; -#endif // M0 > 1 -#if M0 > 2 - acc2 += b0 * (VECTOR_TYPE)a2.s0; - acc2 += b1 * (VECTOR_TYPE)a2.s1; -#endif // M0 > 2 -#if M0 > 3 - acc3 += b0 * (VECTOR_TYPE)a3.s0; - acc3 += b1 * (VECTOR_TYPE)a3.s1; -#endif // M0 > 3 - } - - for(; src_addr.s0 < end_row_vec_a; src_addr += (int2)(sizeof(DATA_TYPE), src1_stride_y)) - { -#if defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - DATA_TYPE a0 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y + zin.s0)); -#if M0 > 1 - DATA_TYPE a1 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y + zin.s1)); -#endif // M0 > 1 -#if M0 > 2 - DATA_TYPE a2 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y + zin.s2)); -#endif // M0 > 2 -#if M0 > 3 - DATA_TYPE a3 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y + zin.s3)); -#endif // M0 > 3 -#else // defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - DATA_TYPE a0 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y)); -#if M0 > 1 - DATA_TYPE a1 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y)); -#endif // M0 > 1 -#if M0 > 2 - DATA_TYPE a2 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y)); -#endif // M0 > 2 -#if M0 > 3 - DATA_TYPE a3 = *((__global DATA_TYPE *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y)); -#endif // M0 > 3 -#endif // defined(REINTERPRET_INPUT_AS_3D) - - // Load values from matrix B - VECTOR_TYPE b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(src1_ptr + src_addr.s1)); - - // Accumulate - acc0 += b0 * (VECTOR_TYPE)a0; -#if M0 > 1 - acc1 += b0 * (VECTOR_TYPE)a1; -#endif // M0 > 1 -#if M0 > 2 - acc2 += b0 * (VECTOR_TYPE)a2; -#endif // M0 > 2 -#if M0 > 3 - acc3 += b0 * (VECTOR_TYPE)a3; -#endif // M0 > 3 - } - - int z = get_global_id(2); - - // Compute dst address - __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0, - PARTIAL_STORE_M0) - * dst_stride_y); - - uint4 zout = 0; - -#if defined(REINTERPRET_OUTPUT_AS_3D) - - // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zout) is calculated dividing row by HEIGHT_GEMM3D - zout = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D; - zout = min(DEPTH_GEMM3D - 1, zout); - - // Add offset due to the cross plane paddings - zout *= (dst_cross_plane_pad * dst_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply dst_stride_z by DEPTH_GEMM3D - dst_addr += z * dst_stride_z * DEPTH_GEMM3D; -#else // defined(REINTERPRET_OUTPUT_AS_3D) - // Add offset for batched GEMM - dst_addr += z * dst_stride_z; -#endif // defined(REINTERPRET_OUTPUT_AS_3D) - - // Multiply by the weight of matrix-matrix product and store the result -#if defined(ALPHA) - SCALE_BLOCK(M0, DATA_TYPE, acc, ALPHA); -#endif // defined(ALPHA) - - // Add beta*bias -#if defined(BETA) - REPEAT_VAR_INIT_TO_CONST(M0, uint, zero, 0); - -#if defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)); - - LOAD_BLOCK(1, N0, DATA_TYPE, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(1, DATA_TYPE, bias, BETA); -#endif // UNIT_BIAS - - // c = c + bias[broadcasted] - ADD_BLOCK_BROADCAST(M0, acc, bias0); - -#else // defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0, - PARTIAL_STORE_M0) - * src2_stride_y) - + z * src2_stride_z; - - LOAD_BLOCK(M0, N0, DATA_TYPE, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(M0, DATA_TYPE, bias, BETA); -#endif // UNIT_BIAS - - // c = c + bias - ADD_BLOCK(M0, acc, bias); - -#endif // defined(BROADCAST_BIAS) -#endif // defined(BETA) - -#if defined(ACTIVATION_TYPE) - ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, acc, A_VAL, B_VAL); -#endif // defined(ACTIVATION_TYPE) - - // Store output block - const bool cond_y = get_global_id(1) == 0; - const bool cond_x = ((get_global_id(0) + 1) * N0 >= N); - STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, acc, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x); -} -#endif // defined(DATA_TYPE) - -/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not been reshaped - * - * @note This OpenCL kernel works with the 32-bit floating point data type (float) and uses the fma units. - * @note The number of elements processed along the x and y directions must be passed at compile time using -DN0 and -DM0. - * @note This kernel processed a fixed number of elements along x: -DN0=4. - * @note The number of columns of matrix A and the number of columns of the matrix B need to be passed at compile time using -DK and -DN - * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1) - * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1) - * @note The optional alpha's value need to be passed at compile time using -DALPHA - * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16) - * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16]) - * - * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively. - * The activation function is performed after the bias addition - * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time: - * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D - * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D - * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor. - * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor - * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped - * - * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32 - * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr - * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr - * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes) - * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes) - * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr - * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) - * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) - * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix - * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] src_cross_plane_pad (Optional) Bottom paddings in unit of elements for the input tensor (only if defined REINTERPRET_INPUT_AS_3D) - * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D) - */ -__kernel void gemm_mm_floating_point_f32_bifrost(IMAGE_DECLARATION(src0), - IMAGE_DECLARATION(src1), -#if defined(BETA) - IMAGE_DECLARATION(src2), -#endif // defined(BETA) - IMAGE_DECLARATION(dst), - uint src0_stride_z, - uint src1_stride_z, -#if defined(BETA) - uint src2_stride_z, -#endif //defined(BETA) - uint dst_stride_z -#if defined(REINTERPRET_INPUT_AS_3D) - , - uint src_cross_plane_pad -#endif // REINTERPRET_INPUT_AS_3D -#if defined(REINTERPRET_OUTPUT_AS_3D) - , - uint dst_cross_plane_pad -#endif // REINTERPRET_OUTPUT_AS_3D - ) -{ - int idx = get_global_id(0) * N0; - - // Compute starting address for matrix A and matrix B - int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes)); - - // Update address for matrix A - src_addr.s0 += COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0) * src0_stride_y; - - // Update address for matrix B - src_addr.s1 += idx * sizeof(float); - -#if defined(REINTERPRET_INPUT_AS_3D) - // Since we load a 2D input tile from a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zin) is calculated dividing row by HEIGHT_GEMM3D - uint4 zin = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D; - zin = min(DEPTH_GEMM3D - 1, zin); - - // Add offset due to the cross plane paddings - zin *= (src_cross_plane_pad * src0_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply src0_stride_z by DEPTH_GEMM3D - src_addr.s0 += get_global_id(2) * src0_stride_z * DEPTH_GEMM3D; - -#else // defined(REINTERPRET_INPUT_AS_3D) - - // Add offset for batched GEMM - src_addr.s0 += get_global_id(2) * src0_stride_z; - -#endif // defined(REINTERPRET_INPUT_AS_3D) - -#if defined(MATRIX_B_DEPTH) - // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3 - src_addr.s1 += (get_global_id(2) % MATRIX_B_DEPTH) * src1_stride_z; -#else // defined(MATRIX_B_DEPTH) - src_addr.s1 += get_global_id(2) * src1_stride_z; -#endif // defined(MATRIX_B_DEPTH) - - // Initialize accumulators - float4 acc0 = 0.0f; - -#if M0 > 1 - float4 acc1 = 0.0f; -#endif // M0 > 1 - -#if M0 > 2 - float4 acc2 = 0.0f; -#endif // M0 > 2 - -#if M0 > 3 - float4 acc3 = 0.0f; -#endif // M0 > 3 - - // A and B src indices get incremented at the same time. - int i = 0; - for(; i <= ((int)K - 4); i += 4) - { -#if defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A and matrix B - LOAD_BLOCK(M0, 4, float, a, src0_ptr, src_addr.s0, src0_stride_y, zin.s); -#else // defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A and matrix B - float4 a0 = vload4(0, (__global float *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y)); -#if M0 > 1 - float4 a1 = vload4(0, (__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y)); -#endif // M0 > 1 -#if M0 > 2 - float4 a2 = vload4(0, (__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y)); -#endif // M0 > 2 -#if M0 > 3 - float4 a3 = vload4(0, (__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y)); -#endif // M0 > 3 -#endif // defined(REINTERPRET_INPUT_AS_3D) - - float4 b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - - // Multiply and accumulate - acc0.s0 = fma(a0.s0, b0.s0, acc0.s0); - acc0.s1 = fma(a0.s0, b0.s1, acc0.s1); - acc0.s2 = fma(a0.s0, b0.s2, acc0.s2); - acc0.s3 = fma(a0.s0, b0.s3, acc0.s3); - -#if M0 > 1 - - acc1.s0 = fma(a1.s0, b0.s0, acc1.s0); - acc1.s1 = fma(a1.s0, b0.s1, acc1.s1); - acc1.s2 = fma(a1.s0, b0.s2, acc1.s2); - acc1.s3 = fma(a1.s0, b0.s3, acc1.s3); - -#endif // M0 > 1 -#if M0 > 2 - - acc2.s0 = fma(a2.s0, b0.s0, acc2.s0); - acc2.s1 = fma(a2.s0, b0.s1, acc2.s1); - acc2.s2 = fma(a2.s0, b0.s2, acc2.s2); - acc2.s3 = fma(a2.s0, b0.s3, acc2.s3); - -#endif // M0 > 2 -#if M0 > 3 - - acc3.s0 = fma(a3.s0, b0.s0, acc3.s0); - acc3.s1 = fma(a3.s0, b0.s1, acc3.s1); - acc3.s2 = fma(a3.s0, b0.s2, acc3.s2); - acc3.s3 = fma(a3.s0, b0.s3, acc3.s3); -#endif // M0 > 3 - - // Load values from matrix A and matrix B - b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - - // Multiply and accumulate - acc0.s0 = fma(a0.s1, b0.s0, acc0.s0); - acc0.s1 = fma(a0.s1, b0.s1, acc0.s1); - acc0.s2 = fma(a0.s1, b0.s2, acc0.s2); - acc0.s3 = fma(a0.s1, b0.s3, acc0.s3); - -#if M0 > 1 - - acc1.s0 = fma(a1.s1, b0.s0, acc1.s0); - acc1.s1 = fma(a1.s1, b0.s1, acc1.s1); - acc1.s2 = fma(a1.s1, b0.s2, acc1.s2); - acc1.s3 = fma(a1.s1, b0.s3, acc1.s3); - -#endif // M0 > 1 -#if M0 > 2 - - acc2.s0 = fma(a2.s1, b0.s0, acc2.s0); - acc2.s1 = fma(a2.s1, b0.s1, acc2.s1); - acc2.s2 = fma(a2.s1, b0.s2, acc2.s2); - acc2.s3 = fma(a2.s1, b0.s3, acc2.s3); - -#endif // M0 > 2 -#if M0 > 3 - - acc3.s0 = fma(a3.s1, b0.s0, acc3.s0); - acc3.s1 = fma(a3.s1, b0.s1, acc3.s1); - acc3.s2 = fma(a3.s1, b0.s2, acc3.s2); - acc3.s3 = fma(a3.s1, b0.s3, acc3.s3); -#endif // M0 > 3 - - // Load values from matrix A and matrix B - b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - - // Multiply and accumulate - acc0.s0 = fma(a0.s2, b0.s0, acc0.s0); - acc0.s1 = fma(a0.s2, b0.s1, acc0.s1); - acc0.s2 = fma(a0.s2, b0.s2, acc0.s2); - acc0.s3 = fma(a0.s2, b0.s3, acc0.s3); - -#if M0 > 1 - - acc1.s0 = fma(a1.s2, b0.s0, acc1.s0); - acc1.s1 = fma(a1.s2, b0.s1, acc1.s1); - acc1.s2 = fma(a1.s2, b0.s2, acc1.s2); - acc1.s3 = fma(a1.s2, b0.s3, acc1.s3); - -#endif // M0 > 1 -#if M0 > 2 - - acc2.s0 = fma(a2.s2, b0.s0, acc2.s0); - acc2.s1 = fma(a2.s2, b0.s1, acc2.s1); - acc2.s2 = fma(a2.s2, b0.s2, acc2.s2); - acc2.s3 = fma(a2.s2, b0.s3, acc2.s3); - -#endif // M0 > 2 -#if M0 > 3 - - acc3.s0 = fma(a3.s2, b0.s0, acc3.s0); - acc3.s1 = fma(a3.s2, b0.s1, acc3.s1); - acc3.s2 = fma(a3.s2, b0.s2, acc3.s2); - acc3.s3 = fma(a3.s2, b0.s3, acc3.s3); -#endif // M0 > 3 - - // Load values from matrix A and matrix B - b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - - // Multiply and accumulate - acc0.s0 = fma(a0.s3, b0.s0, acc0.s0); - acc0.s1 = fma(a0.s3, b0.s1, acc0.s1); - acc0.s2 = fma(a0.s3, b0.s2, acc0.s2); - acc0.s3 = fma(a0.s3, b0.s3, acc0.s3); - -#if M0 > 1 - - acc1.s0 = fma(a1.s3, b0.s0, acc1.s0); - acc1.s1 = fma(a1.s3, b0.s1, acc1.s1); - acc1.s2 = fma(a1.s3, b0.s2, acc1.s2); - acc1.s3 = fma(a1.s3, b0.s3, acc1.s3); - -#endif // M0 > 1 -#if M0 > 2 - - acc2.s0 = fma(a2.s3, b0.s0, acc2.s0); - acc2.s1 = fma(a2.s3, b0.s1, acc2.s1); - acc2.s2 = fma(a2.s3, b0.s2, acc2.s2); - acc2.s3 = fma(a2.s3, b0.s3, acc2.s3); - -#endif // M0 > 2 -#if M0 > 3 - - acc3.s0 = fma(a3.s3, b0.s0, acc3.s0); - acc3.s1 = fma(a3.s3, b0.s1, acc3.s1); - acc3.s2 = fma(a3.s3, b0.s2, acc3.s2); - acc3.s3 = fma(a3.s3, b0.s3, acc3.s3); -#endif // M0 > 3 - - src_addr.s0 += 4 * sizeof(float); - } - - for(; i < (int)K; ++i) - { -#if defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - float a0 = *((__global float *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y + zin.s0)); -#if M0 > 1 - float a1 = *((__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y + zin.s1)); -#endif // M0 > 1 -#if M0 > 2 - float a2 = *((__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y + zin.s2)); -#endif // M0 > 2 -#if M0 > 3 - float a3 = *((__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y + zin.s3)); -#endif // M0 > 3 -#else // defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - float a0 = *((__global float *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y)); -#if M0 > 1 - float a1 = *((__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y)); -#endif // M0 > 1 -#if M0 > 2 - float a2 = *((__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y)); -#endif // M0 > 2 -#if M0 > 3 - float a3 = *((__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y)); -#endif // M0 > 3 -#endif // defined(REINTERPRET_INPUT_AS_3D) - - // Load values from matrix B - float4 b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - - // Multiply and accumulate - acc0.s0 = fma(a0, b0.s0, acc0.s0); - acc0.s1 = fma(a0, b0.s1, acc0.s1); - acc0.s2 = fma(a0, b0.s2, acc0.s2); - acc0.s3 = fma(a0, b0.s3, acc0.s3); -#if M0 > 1 - acc1.s0 = fma(a1, b0.s0, acc1.s0); - acc1.s1 = fma(a1, b0.s1, acc1.s1); - acc1.s2 = fma(a1, b0.s2, acc1.s2); - acc1.s3 = fma(a1, b0.s3, acc1.s3); -#endif // M0 > 1 -#if M0 > 2 - acc2.s0 = fma(a2, b0.s0, acc2.s0); - acc2.s1 = fma(a2, b0.s1, acc2.s1); - acc2.s2 = fma(a2, b0.s2, acc2.s2); - acc2.s3 = fma(a2, b0.s3, acc2.s3); -#endif // M0 > 2 -#if M0 > 3 - acc3.s0 = fma(a3, b0.s0, acc3.s0); - acc3.s1 = fma(a3, b0.s1, acc3.s1); - acc3.s2 = fma(a3, b0.s2, acc3.s2); - acc3.s3 = fma(a3, b0.s3, acc3.s3); -#endif // M0 > 3 - - src_addr.s0 += sizeof(float); - } - - int z = get_global_id(2); - - // Compute dst address - __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (get_global_id(0) * (uint)4 * sizeof(float)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0, - PARTIAL_STORE_M0) - * dst_stride_y); - - uint4 zout = 0; - -#if defined(REINTERPRET_OUTPUT_AS_3D) - // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zout) is calculated dividing row by HEIGHT_GEMM3D - zout = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D; - zout = min(DEPTH_GEMM3D - 1, zout); - - // Add offset due to the cross plane paddings - zout *= (dst_cross_plane_pad * dst_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply dst_stride_z by DEPTH_GEMM3D - dst_addr += z * dst_stride_z * DEPTH_GEMM3D; -#else // defined(REINTERPRET_OUTPUT_AS_3D) - // Add offset for batched GEMM - dst_addr += z * dst_stride_z; -#endif // defined(REINTERPRET_OUTPUT_AS_3D) - - // Multiply by the weight of matrix-matrix product and store the result -#if defined(ALPHA) - SCALE_BLOCK(M0, float, acc, ALPHA); -#endif // defined(ALPHA) - - // Add beta*bias -#if defined(BETA) - REPEAT_VAR_INIT_TO_CONST(M0, uint, zero, 0); - -#if defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)4 * sizeof(float)); - - LOAD_BLOCK(1, 4, float, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(1, float, bias, BETA); -#endif // UNIT_BIAS - - // acc = acc + bias[broadcasted] - ADD_BLOCK_BROADCAST(M0, acc, bias0); - -#else // defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)4 * sizeof(float)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0, - PARTIAL_STORE_M0) - * src2_stride_y) - + z * src2_stride_z; - - LOAD_BLOCK(M0, 4, float, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(M0, float, bias, BETA); -#endif // UNIT_BIAS - - // acc = acc + bias - ADD_BLOCK(M0, acc, bias); - -#endif // defined(BROADCAST_BIAS) -#endif // defined(BETA) - -#if defined(ACTIVATION_TYPE) - ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, float, VEC_SIZE, acc, A_VAL, B_VAL); -#endif // defined(ACTIVATION_TYPE) - - // Store the output block - const bool cond_y = get_global_id(1) == 0; - const bool cond_x = ((get_global_id(0) + 1) * 4 >= N); - STORE_BLOCK_BOUNDARY_AWARE(M0, 4, float, acc, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x); -} - -/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not been reshaped - * - * @note This OpenCL kernel works with the 32-bit floating point data type (float) and uses the fma units. - * This OpenCL kernel is optimized for Bifrost when the number of matrix B columns is less or equal to 1000. - * @note The number of elements processed along the x and y directions must be passed at compile time using -DN0 and -DM0. - * @note This kernel processed a fixed number of elements along x: -DN0=2. - * @note The number of columns of matrix A and the number of columns of the matrix B need to be passed at compile time using -DK and -DN - * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1) - * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1) - * @note The optional alpha's value need to be passed at compile time using -DALPHA - * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16) - * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16]) - * - * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively. - * The activation function is performed after the bias addition - * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time: - * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D - * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D - * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor. - * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor - * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped - * - * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32 - * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr - * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr - * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes) - * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes) - * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr - * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) - * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) - * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix - * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] src_cross_plane_pad (Optional) Bottom paddings in unit of elements for the input tensor (only if defined REINTERPRET_INPUT_AS_3D) - * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D) - */ -__kernel void gemm_mm_floating_point_f32_bifrost_1000(IMAGE_DECLARATION(src0), - IMAGE_DECLARATION(src1), -#if defined(BETA) - IMAGE_DECLARATION(src2), -#endif // defined(BETA) - IMAGE_DECLARATION(dst), - uint src0_stride_z, - uint src1_stride_z, -#if defined(BETA) - uint src2_stride_z, -#endif //defined(BETA) - uint dst_stride_z -#if defined(REINTERPRET_INPUT_AS_3D) - , - uint src_cross_plane_pad -#endif // REINTERPRET_INPUT_AS_3D -#if defined(REINTERPRET_OUTPUT_AS_3D) - , - uint dst_cross_plane_pad -#endif // REINTERPRET_OUTPUT_AS_3D - ) -{ - // Requires 2 N0, C vect2, A vect4, B (2 vload2) // to fix for M0 > 1 - int idx = get_global_id(0) * N0; - - // Compute starting address for matrix A and Matrix B - int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes)); - - // Update address for the matrix A - src_addr.s0 += COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0) * src0_stride_y; - - // Update address for the matrix B - src_addr.s1 += idx * sizeof(float); - -#if defined(REINTERPRET_INPUT_AS_3D) - // Since we load a 2D input tile from a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zin) is calculated dividing row by HEIGHT_GEMM3D - uint4 zin = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D; - zin = min(DEPTH_GEMM3D - 1, zin); - - // Add offset due to the cross plane paddings - zin *= (src_cross_plane_pad * src0_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply src0_stride_z by DEPTH_GEMM3D - src_addr.s0 += get_global_id(2) * src0_stride_z * DEPTH_GEMM3D; - -#else // defined(REINTERPRET_INPUT_AS_3D) - - // Add offset for batched GEMM - src_addr.s0 += get_global_id(2) * src0_stride_z; - -#endif // defined(REINTERPRET_INPUT_AS_3D) - -#if defined(MATRIX_B_DEPTH) - // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3 - src_addr.s1 += (get_global_id(2) % MATRIX_B_DEPTH) * src1_stride_z; -#else // defined(MATRIX_B_DEPTH) - src_addr.s1 += get_global_id(2) * src1_stride_z; -#endif // defined(MATRIX_B_DEPTH) - - // Initialize accumulators - float2 acc0 = 0.0f; -#if M0 > 1 - float2 acc1 = 0.0f; -#endif // M0 > 1 -#if M0 > 2 - float2 acc2 = 0.0f; -#endif // M0 > 2 -#if M0 > 3 - float2 acc3 = 0.0f; -#endif // M0 > 3 - - // A and B src indices get incremented at the same time. - int i = 0; - for(; i <= ((int)K - 8); i += 8) - { -#if defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - float8 a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0 + zin.s0)); -#else // defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - float8 a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0)); -#endif // defined(REINTERPRET_INPUT_AS_3D) - - // Load values from matrix B - float2 b0 = vload2(0, (__global float *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - float2 b1 = vload2(0, (__global float *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - float2 b2 = vload2(0, (__global float *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - float2 b3 = vload2(0, (__global float *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - float2 b4 = vload2(0, (__global float *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - float2 b5 = vload2(0, (__global float *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - float2 b6 = vload2(0, (__global float *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - float2 b7 = vload2(0, (__global float *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - - // Multiply and accumulate - acc0.s0 = fma(a0.s0, b0.s0, acc0.s0); - acc0.s0 = fma(a0.s1, b1.s0, acc0.s0); - acc0.s0 = fma(a0.s2, b2.s0, acc0.s0); - acc0.s0 = fma(a0.s3, b3.s0, acc0.s0); - acc0.s0 = fma(a0.s4, b4.s0, acc0.s0); - acc0.s0 = fma(a0.s5, b5.s0, acc0.s0); - acc0.s0 = fma(a0.s6, b6.s0, acc0.s0); - acc0.s0 = fma(a0.s7, b7.s0, acc0.s0); - - acc0.s1 = fma(a0.s0, b0.s1, acc0.s1); - acc0.s1 = fma(a0.s1, b1.s1, acc0.s1); - acc0.s1 = fma(a0.s2, b2.s1, acc0.s1); - acc0.s1 = fma(a0.s3, b3.s1, acc0.s1); - acc0.s1 = fma(a0.s4, b4.s1, acc0.s1); - acc0.s1 = fma(a0.s5, b5.s1, acc0.s1); - acc0.s1 = fma(a0.s6, b6.s1, acc0.s1); - acc0.s1 = fma(a0.s7, b7.s1, acc0.s1); - -#if M0 > 1 -#if defined(REINTERPRET_INPUT_AS_3D) - a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y + zin.s1)); -#else // defined(REINTERPRET_INPUT_AS_3D) - a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y)); -#endif // defined(REINTERPRET_INPUT_AS_3D) - acc1.s0 = fma(a0.s0, b0.s0, acc1.s0); - acc1.s0 = fma(a0.s1, b1.s0, acc1.s0); - acc1.s0 = fma(a0.s2, b2.s0, acc1.s0); - acc1.s0 = fma(a0.s3, b3.s0, acc1.s0); - acc1.s0 = fma(a0.s4, b4.s0, acc1.s0); - acc1.s0 = fma(a0.s5, b5.s0, acc1.s0); - acc1.s0 = fma(a0.s6, b6.s0, acc1.s0); - acc1.s0 = fma(a0.s7, b7.s0, acc1.s0); - - acc1.s1 = fma(a0.s0, b0.s1, acc1.s1); - acc1.s1 = fma(a0.s1, b1.s1, acc1.s1); - acc1.s1 = fma(a0.s2, b2.s1, acc1.s1); - acc1.s1 = fma(a0.s3, b3.s1, acc1.s1); - acc1.s1 = fma(a0.s4, b4.s1, acc1.s1); - acc1.s1 = fma(a0.s5, b5.s1, acc1.s1); - acc1.s1 = fma(a0.s6, b6.s1, acc1.s1); - acc1.s1 = fma(a0.s7, b7.s1, acc1.s1); -#endif // M0 > 1 -#if M0 > 2 -#if defined(REINTERPRET_INPUT_AS_3D) - a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y + zin.s2)); -#else // defined(REINTERPRET_INPUT_AS_3D) - a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y)); -#endif // defined(REINTERPRET_INPUT_AS_3D) - acc2.s0 = fma(a0.s0, b0.s0, acc2.s0); - acc2.s0 = fma(a0.s1, b1.s0, acc2.s0); - acc2.s0 = fma(a0.s2, b2.s0, acc2.s0); - acc2.s0 = fma(a0.s3, b3.s0, acc2.s0); - acc2.s0 = fma(a0.s4, b4.s0, acc2.s0); - acc2.s0 = fma(a0.s5, b5.s0, acc2.s0); - acc2.s0 = fma(a0.s6, b6.s0, acc2.s0); - acc2.s0 = fma(a0.s7, b7.s0, acc2.s0); - - acc2.s1 = fma(a0.s0, b0.s1, acc2.s1); - acc2.s1 = fma(a0.s1, b1.s1, acc2.s1); - acc2.s1 = fma(a0.s2, b2.s1, acc2.s1); - acc2.s1 = fma(a0.s3, b3.s1, acc2.s1); - acc2.s1 = fma(a0.s4, b4.s1, acc2.s1); - acc2.s1 = fma(a0.s5, b5.s1, acc2.s1); - acc2.s1 = fma(a0.s6, b6.s1, acc2.s1); - acc2.s1 = fma(a0.s7, b7.s1, acc2.s1); -#endif // M0 > 2 -#if M0 > 3 -#if defined(REINTERPRET_INPUT_AS_3D) - a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y + zin.s3)); -#else // defined(REINTERPRET_INPUT_AS_3D) - a0 = vload8(0, (__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y)); -#endif // defined(REINTERPRET_INPUT_AS_3D) - acc3.s0 = fma(a0.s0, b0.s0, acc3.s0); - acc3.s0 = fma(a0.s1, b1.s0, acc3.s0); - acc3.s0 = fma(a0.s2, b2.s0, acc3.s0); - acc3.s0 = fma(a0.s3, b3.s0, acc3.s0); - acc3.s0 = fma(a0.s4, b4.s0, acc3.s0); - acc3.s0 = fma(a0.s5, b5.s0, acc3.s0); - acc3.s0 = fma(a0.s6, b6.s0, acc3.s0); - acc3.s0 = fma(a0.s7, b7.s0, acc3.s0); - - acc3.s1 = fma(a0.s0, b0.s1, acc3.s1); - acc3.s1 = fma(a0.s1, b1.s1, acc3.s1); - acc3.s1 = fma(a0.s2, b2.s1, acc3.s1); - acc3.s1 = fma(a0.s3, b3.s1, acc3.s1); - acc3.s1 = fma(a0.s4, b4.s1, acc3.s1); - acc3.s1 = fma(a0.s5, b5.s1, acc3.s1); - acc3.s1 = fma(a0.s6, b6.s1, acc3.s1); - acc3.s1 = fma(a0.s7, b7.s1, acc3.s1); -#endif // M0 > 3 - - src_addr.s0 += sizeof(float) * 8; - } - // float size increment - for(; i < (int)K; ++i) - { -#if defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - float a0 = *((__global float *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y + zin.s0)); -#if M0 > 1 - float a1 = *((__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y + zin.s1)); -#endif // M0 > 1 -#if M0 > 2 - float a2 = *((__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y + zin.s2)); -#endif // M0 > 2 -#if M0 > 3 - float a3 = *((__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y + zin.s3)); -#endif // M0 > 3 -#else // defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - float a0 = *((__global float *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y)); -#if M0 > 1 - float a1 = *((__global float *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y)); -#endif // M0 > 1 -#if M0 > 2 - float a2 = *((__global float *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y)); -#endif // M0 > 2 -#if M0 > 3 - float a3 = *((__global float *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y)); -#endif // M0 > 3 -#endif // defined(REINTERPRET_INPUT_AS_3D) - - // Load values from matrix B - float2 b0 = vload2(0, (__global float *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - - // Multiply and accumulate - acc0.s0 = fma(a0, b0.s0, acc0.s0); - acc0.s1 = fma(a0, b0.s1, acc0.s1); -#if M0 > 1 - acc1.s0 = fma(a1, b0.s0, acc1.s0); - acc1.s1 = fma(a1, b0.s1, acc1.s1); -#endif // M0 > 1 -#if M0 > 2 - acc2.s0 = fma(a2, b0.s0, acc2.s0); - acc2.s1 = fma(a2, b0.s1, acc2.s1); -#endif // M0 > 2 -#if M0 > 3 - acc3.s0 = fma(a3, b0.s0, acc3.s0); - acc3.s1 = fma(a3, b0.s1, acc3.s1); -#endif // M0 > 3 - - src_addr.s0 += sizeof(float); - } - - int z = get_global_id(2); - - // Compute dst address - __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (get_global_id(0) * (uint)2 * sizeof(float)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0, - PARTIAL_STORE_M0) - * dst_stride_y); - - uint4 zout = 0; - -#if defined(REINTERPRET_OUTPUT_AS_3D) - - // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zout) is calculated dividing row by HEIGHT_GEMM3D - zout = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D; - zout = min(DEPTH_GEMM3D - 1, zout); - - // Add offset due to the cross plane paddings - zout *= (dst_cross_plane_pad * dst_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply dst_stride_z by DEPTH_GEMM3D - dst_addr += z * dst_stride_z * DEPTH_GEMM3D; -#else // defined(REINTERPRET_OUTPUT_AS_3D) - // Add offset for batched GEMM - dst_addr += z * dst_stride_z; -#endif // defined(REINTERPRET_OUTPUT_AS_3D) - - // Multiply by the weight of matrix-matrix product and store the result -#if defined(ALPHA) - SCALE_BLOCK(M0, float, acc, ALPHA); -#endif // defined(ALPHA) - - // Add beta*bias -#if defined(BETA) - REPEAT_VAR_INIT_TO_CONST(M0, uint, zero, 0); - -#if defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)2 * sizeof(float)); - - LOAD_BLOCK(1, 2, float, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(1, float, bias, BETA); -#endif // UNIT_BIAS - - // acc = acc + bias[broadcasted] - ADD_BLOCK_BROADCAST(M0, acc, bias0); - -#else // defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)2 * sizeof(float)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0, - PARTIAL_STORE_M0) - * src2_stride_y) - + z * src2_stride_z; - - LOAD_BLOCK(M0, 2, float, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(M0, float, bias, BETA); -#endif // UNIT_BIAS - - // acc = acc + bias - ADD_BLOCK(M0, acc, bias); - -#endif // defined(BROADCAST_BIAS) -#endif // defined(BETA) - -#if defined(ACTIVATION_TYPE) - ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, float, VEC_SIZE, acc, A_VAL, B_VAL); -#endif // defined(ACTIVATION_TYPE) - - // Store the output block - const bool cond_y = get_global_id(1) == 0; - const bool cond_x = ((get_global_id(0) + 1) * 2 >= N); - STORE_BLOCK_BOUNDARY_AWARE(M0, 2, float, acc, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x); -} - -#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) -/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not beed reshaped - * - * @note This OpenCL kernel works with the 16-bit floating point data type (half) and accumulating the result in a 32 floating point variable. - * @note The number of elements processed along the x and y directions must be passed at compile time using -DN0 and -DM0. - * @note This kernel processed a fixed number of elements along x: -DN0=8. - * @note The number of columns of matrix A and the number of columns of the matrix B need to be passed at compile time using -DK and -DN - * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1) - * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1) - * @note The optional alpha's value need to be passed at compile time using -DALPHA - * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16) - * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16]) - * - * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively. - * The activation function is performed after the bias addition - * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time: - * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D - * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D - * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor. - * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor - * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped - * - * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16 - * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr - * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr - * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes) - * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes) - * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr - * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) - * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) - * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix - * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] src_cross_plane_pad (Optional) Bottom paddings in unit of elements for the input tensor (only if defined REINTERPRET_INPUT_AS_3D) - * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D) - */ -__kernel void gemm_mm_floating_point_f16_bifrost_acc32(IMAGE_DECLARATION(src0), - IMAGE_DECLARATION(src1), -#if defined(BETA) - IMAGE_DECLARATION(src2), -#endif // defined(BETA) - IMAGE_DECLARATION(dst), - uint src0_stride_z, - uint src1_stride_z, -#if defined(BETA) - uint src2_stride_z, -#endif //defined(BETA) - uint dst_stride_z -#if defined(REINTERPRET_INPUT_AS_3D) - , - uint src_cross_plane_pad -#endif // REINTERPRET_INPUT_AS_3D -#if defined(REINTERPRET_OUTPUT_AS_3D) - , - uint dst_cross_plane_pad -#endif // REINTERPRET_OUTPUT_AS_3D - ) -{ - int idx = get_global_id(0) * N0; - - // Compute starting address for matrix A and Matrix B - int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes)); - - // Update address for the matrix A - src_addr.s0 += COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0) * src0_stride_y; - - // Update address for the matrix B - src_addr.s1 += idx * sizeof(half); - -#if defined(REINTERPRET_INPUT_AS_3D) - // Since we load a 2D input tile from a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zin) is calculated dividing row by HEIGHT_GEMM3D - uint4 zin = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D; - zin = min(DEPTH_GEMM3D - 1, zin); - - // Add offset due to the cross plane paddings - zin *= (src_cross_plane_pad * src0_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply src0_stride_z by DEPTH_GEMM3D - src_addr.s0 += get_global_id(2) * src0_stride_z * DEPTH_GEMM3D; - -#else // defined(REINTERPRET_INPUT_AS_3D) - - // Add offset for batched GEMM - src_addr.s0 += get_global_id(2) * src0_stride_z; - -#endif // defined(REINTERPRET_INPUT_AS_3D) - -#if defined(MATRIX_B_DEPTH) - // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3 - src_addr.s1 += (get_global_id(2) % MATRIX_B_DEPTH) * src1_stride_z; -#else // defined(MATRIX_B_DEPTH) - src_addr.s1 += get_global_id(2) * src1_stride_z; -#endif // defined(MATRIX_B_DEPTH) - - float8 acc0 = 0.0h; -#if M0 > 1 - float8 acc1 = 0.0h; -#endif // M0 > 1 -#if M0 > 2 - float8 acc2 = 0.0h; -#endif // M0 > 2 -#if M0 > 3 - float8 acc3 = 0.0h; -#endif // M0 > 3 - - int i = 0; - for(; i <= ((int)K - 4); i += 4) - { -#if defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - LOAD_BLOCK(M0, 4, half, a, src0_ptr, src_addr.s0, src0_stride_y, zin.s); -#else // defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - half4 a0 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y)); -#if M0 > 1 - half4 a1 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y)); -#endif // M0 > 1 -#if M0 > 2 - half4 a2 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y)); -#endif // M0 > 2 -#if M0 > 3 - half4 a3 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y)); -#endif // M0 > 3 -#endif // defined(REINTERPRET_INPUT_AS_3D) - - // Load values from matrix B - float8 b0 = convert_float8(vload8(0, (__global half *)(src1_ptr + src_addr.s1))); - src_addr.s1 += src1_stride_y; - - // Accumulate - acc0 = fma(b0, (float8)a0.s0, acc0); -#if M0 > 1 - acc1 = fma(b0, (float8)a1.s0, acc1); -#endif // M0 > 1 -#if M0 > 2 - acc2 = fma(b0, (float8)a2.s0, acc2); -#endif // M0 > 2 -#if M0 > 3 - acc3 = fma(b0, (float8)a3.s0, acc3); -#endif // M0 > 3 - - b0 = convert_float8(vload8(0, (__global half *)(src1_ptr + src_addr.s1))); - src_addr.s1 += src1_stride_y; - acc0 = fma(b0, (float8)a0.s1, acc0); -#if M0 > 1 - acc1 = fma(b0, (float8)a1.s1, acc1); -#endif // M0 > 1 -#if M0 > 2 - acc2 = fma(b0, (float8)a2.s1, acc2); -#endif // M0 > 2 -#if M0 > 3 - acc3 = fma(b0, (float8)a3.s1, acc3); -#endif // M0 > 3 - - b0 = convert_float8(vload8(0, (__global half *)(src1_ptr + src_addr.s1))); - src_addr.s1 += src1_stride_y; - acc0 = fma(b0, (float8)a0.s2, acc0); -#if M0 > 1 - acc1 = fma(b0, (float8)a1.s2, acc1); -#endif // M0 > 1 -#if M0 > 2 - acc2 = fma(b0, (float8)a2.s2, acc2); -#endif // M0 > 2 -#if M0 > 3 - acc3 = fma(b0, (float8)a3.s2, acc3); -#endif // M0 > 3 - - b0 = convert_float8(vload8(0, (__global half *)(src1_ptr + src_addr.s1))); - src_addr.s1 += src1_stride_y; - acc0 = fma(b0, (float8)a0.s3, acc0); -#if M0 > 1 - acc1 = fma(b0, (float8)a1.s3, acc1); -#endif // M0 > 1 -#if M0 > 2 - acc2 = fma(b0, (float8)a2.s3, acc2); -#endif // M0 > 2 -#if M0 > 3 - acc3 = fma(b0, (float8)a3.s3, acc3); -#endif // M0 > 3 - - src_addr.s0 += 4 * sizeof(half); - } - - for(; i < (int)K; ++i) - { -#if defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - half a0 = *((__global half *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y + zin.s0)); -#if M0 > 1 - half a1 = *((__global half *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y + zin.s1)); -#endif // M0 > 1 -#if M0 > 2 - half a2 = *((__global half *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y + zin.s2)); -#endif // M0 > 2 -#if M0 > 3 - half a3 = *((__global half *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y + zin.s3)); -#endif // M0 > 3 -#else // defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - half a0 = *((__global half *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y)); -#if M0 > 1 - half a1 = *((__global half *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y)); -#endif // M0 > 1 -#if M0 > 2 - half a2 = *((__global half *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y)); -#endif // M0 > 2 -#if M0 > 3 - half a3 = *((__global half *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y)); -#endif // M0 > 3 -#endif // defined(REINTERPRET_INPUT_AS_3D) - - // Load values from matrix B - float8 b0 = convert_float8(vload8(0, (__global half *)(src1_ptr + src_addr.s1))); - - src_addr += (int2)(sizeof(half), src1_stride_y); - - // Accumulate - acc0 = fma(b0, (float8)a0, acc0); // b0 * (half8)a0; -#if M0 > 1 - acc1 = fma(b0, (float8)a1, acc1); // b0 * (half8)a1; -#endif // M0 > 1 -#if M0 > 2 - acc2 = fma(b0, (float8)a2, acc2); // b0 * (half8)a2; -#endif // M0 > 2 -#if M0 > 3 - acc3 = fma(b0, (float8)a3, acc3); // b0 * (half8)a3; -#endif // M0 > 3 - } - - int z = get_global_id(2); - - // Compute dst address - __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0) * dst_stride_y); - - uint4 zout = 0; - -#if defined(REINTERPRET_OUTPUT_AS_3D) - - // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zout) is calculated dividing row by HEIGHT_GEMM3D - zout = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D; - zout = min(DEPTH_GEMM3D - 1, zout); - - // Add offset due to the cross plane paddings - zout *= (dst_cross_plane_pad * dst_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply dst_stride_z by DEPTH_GEMM3D - dst_addr += z * dst_stride_z * DEPTH_GEMM3D; -#else // defined(REINTERPRET_OUTPUT_AS_3D) - // Add offset for batched GEMM - dst_addr += z * dst_stride_z; -#endif // defined(REINTERPRET_OUTPUT_AS_3D) - - // Multiply by the weight of matrix-matrix product and store the result -#if defined(ALPHA) - SCALE_BLOCK(M0, float, acc, ALPHA); -#endif // defined(ALPHA) - -#if defined(BETA) - REPEAT_VAR_INIT_TO_CONST(M0, uint, zero, 0); - -#if defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)); - - LOAD_BLOCK(1, 8, half, bias, src2_addr, 0, src2_stride_y, zero); - - float8 bias_f0 = convert_float8(bias0); - -#ifndef UNIT_BETA - SCALE_BLOCK(1, float, bias_f, BETA); -#endif // UNIT_BIAS - - // acc = acc + bias[broadcasted] - ADD_BLOCK_BROADCAST(M0, acc, bias_f0); - -#else // defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0, - PARTIAL_STORE_M0) - * src2_stride_y) - + z * src2_stride_z; - - LOAD_BLOCK(M0, 8, half, bias, src2_addr, 0, src2_stride_y, zero); - - float8 bias_f0 = convert_float8(bias0); -#if M0 > 1 - float8 bias_f1 = convert_float8(bias1); -#endif // M0 > 1 -#if M0 > 2 - float8 bias_f2 = convert_float8(bias2); -#endif // M0 > 2 -#if M0 > 3 - float8 bias_f3 = convert_float8(bias3); -#endif // M0 > 3 - -#ifndef UNIT_BETA - SCALE_BLOCK(M0, float, bias_f, BETA); -#endif // UNIT_BIAS - - // acc = acc + bias - ADD_BLOCK(M0, acc, bias_f); - -#endif // defined(BROADCAST_BIAS) -#endif // defined(BETA) - - half8 acc_h0 = convert_half8(acc0); -#if M0 > 1 - half8 acc_h1 = convert_half8(acc1); -#endif // M0 > 1 -#if M0 > 2 - half8 acc_h2 = convert_half8(acc2); -#endif // M0 > 2 -#if M0 > 3 - half8 acc_h3 = convert_half8(acc3); -#endif // M0 > 3 - -#if defined(ACTIVATION_TYPE) - ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, half, VEC_SIZE, acc_h, A_VAL, B_VAL); -#endif // defined(ACTIVATION_TYPE) - - // Store the output block - const bool cond_y = get_global_id(1) == 0; - const bool cond_x = ((get_global_id(0) + 1) * 8 >= N); - STORE_BLOCK_BOUNDARY_AWARE(M0, 8, half, acc_h, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x); -} - -/** This OpenCL kernel computes the matrix by matrix multiplication between the matrix A (src0) and matrix B (src1) in case both matrices have not beed reshaped - * - * @note This OpenCL kernel works with the 16-bit floating point data type (half) and uses the fma units. - * @note The number of elements processed along the x and y directions must be passed at compile time using -DN0 and -DM0. - * @note This kernel processed a fixed number of elements along x: -DN0=8. - * @note The number of columns of matrix A and the number of columns of the matrix B need to be passed at compile time using -DK and -DN - * @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1) - * @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1) - * @note The optional alpha's value need to be passed at compile time using -DALPHA - * @note In case the matrix B has 3 dimensions and the matrix A more than 3, in order to avoid out-of-bounds reads, the number of channels of matrix B must be passed at compile time using MATRIX_B_DEPTH (e.g. -DMATRIX_B_DEPTH=16) - * This case can happen when GEMM is used to perform the element-wise multiplication through a batched matrix multiplication (2D Winograd) and we have multiple inputs (e.g. a = [K, M, 16, Batches], b = [N, K, 16]) - * - * @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively. - * The activation function is performed after the bias addition - * @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time: - * -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D - * -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D - * -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor. - * -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor - * (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped - * - * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16 - * @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr - * @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes) - * @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes) - * @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix - * @param[in] src2_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr - * @param[in] src2_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes) - * @param[in] src2_step_x (Optional) src2_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] src2_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes) - * @param[in] src2_step_y (Optional) src2_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] src2_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix - * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr - * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) - * @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes) - * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) - * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) - * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix - * @param[in] src0_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes) - * @param[in] src2_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes) - * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) - * @param[in] src_cross_plane_pad (Optional) Bottom paddings in unit of elements for the input tensor (only if defined REINTERPRET_INPUT_AS_3D) - * @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D) - */ -__kernel void gemm_mm_floating_point_f16_bifrost(IMAGE_DECLARATION(src0), - IMAGE_DECLARATION(src1), -#if defined(BETA) - IMAGE_DECLARATION(src2), -#endif // defined(BETA) - IMAGE_DECLARATION(dst), - uint src0_stride_z, - uint src1_stride_z, -#if defined(BETA) - uint src2_stride_z, -#endif //defined(BETA) - uint dst_stride_z -#if defined(REINTERPRET_INPUT_AS_3D) - , - uint src_cross_plane_pad -#endif // REINTERPRET_INPUT_AS_3D -#if defined(REINTERPRET_OUTPUT_AS_3D) - , - uint dst_cross_plane_pad -#endif // REINTERPRET_OUTPUT_AS_3D - ) -{ - int idx = get_global_id(0) * N0; - - // Compute starting address for matrix A and Matrix B - int2 src_addr = ((int2)(src0_offset_first_element_in_bytes, src1_offset_first_element_in_bytes)); - - // Update address for the matrix A - src_addr.s0 += COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0) * src0_stride_y; - - // Update address for the matrix B - src_addr.s1 += idx * sizeof(half); - -#if defined(REINTERPRET_INPUT_AS_3D) - // Since we load a 2D input tile from a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zin) is calculated dividing row by HEIGHT_GEMM3D - uint4 zin = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D; - zin = min(DEPTH_GEMM3D - 1, zin); - - // Add offset due to the cross plane paddings - zin *= (src_cross_plane_pad * src0_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply src0_stride_z by DEPTH_GEMM3D - src_addr.s0 += get_global_id(2) * src0_stride_z * DEPTH_GEMM3D; - -#else // defined(REINTERPRET_INPUT_AS_3D) - - // Add offset for batched GEMM - src_addr.s0 += get_global_id(2) * src0_stride_z; - -#endif // defined(REINTERPRET_INPUT_AS_3D) - -#if defined(MATRIX_B_DEPTH) - // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3 - src_addr.s1 += (get_global_id(2) % MATRIX_B_DEPTH) * src1_stride_z; -#else // defined(MATRIX_B_DEPTH) - src_addr.s1 += get_global_id(2) * src1_stride_z; -#endif // defined(MATRIX_B_DEPTH) - - half8 acc0 = 0.0h; -#if M0 > 1 - half8 acc1 = 0.0h; -#endif // M0 > 1 -#if M0 > 2 - half8 acc2 = 0.0h; -#endif // M0 > 2 -#if M0 > 3 - half8 acc3 = 0.0h; -#endif // M0 > 3 - - int i = 0; - for(; i <= ((int)K - 4); i += 4) - { -#if defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - LOAD_BLOCK(M0, 4, half, a, src0_ptr, src_addr.s0, src0_stride_y, zin.s); -#else // defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - half4 a0 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y)); -#if M0 > 1 - half4 a1 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y)); -#endif // M0 > 1 -#if M0 > 2 - half4 a2 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y)); -#endif // M0 > 2 -#if M0 > 3 - half4 a3 = vload4(0, (__global half *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y)); -#endif // M0 > 3 -#endif // defined(REINTERPRET_INPUT_AS_3D) - - // Load values from matrix B - half8 b0 = vload8(0, (__global half *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - - // Accumulate - acc0 = fma(b0, (half8)a0.s0, acc0); -#if M0 > 1 - acc1 = fma(b0, (half8)a1.s0, acc1); -#endif // M0 > 1 -#if M0 > 2 - acc2 = fma(b0, (half8)a2.s0, acc2); -#endif // M0 > 2 -#if M0 > 3 - acc3 = fma(b0, (half8)a3.s0, acc3); -#endif // M0 > 3 - - b0 = vload8(0, (__global half *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - acc0 = fma(b0, (half8)a0.s1, acc0); -#if M0 > 1 - acc1 = fma(b0, (half8)a1.s1, acc1); -#endif // M0 > 1 -#if M0 > 2 - acc2 = fma(b0, (half8)a2.s1, acc2); -#endif // M0 > 2 -#if M0 > 3 - acc3 = fma(b0, (half8)a3.s1, acc3); -#endif // M0 > 3 - - b0 = vload8(0, (__global half *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - acc0 = fma(b0, (half8)a0.s2, acc0); -#if M0 > 1 - acc1 = fma(b0, (half8)a1.s2, acc1); -#endif // M0 > 1 -#if M0 > 2 - acc2 = fma(b0, (half8)a2.s2, acc2); -#endif // M0 > 2 -#if M0 > 3 - acc3 = fma(b0, (half8)a3.s2, acc3); -#endif // M0 > 3 - - b0 = vload8(0, (__global half *)(src1_ptr + src_addr.s1)); - src_addr.s1 += src1_stride_y; - acc0 = fma(b0, (half8)a0.s3, acc0); -#if M0 > 1 - acc1 = fma(b0, (half8)a1.s3, acc1); -#endif // M0 > 1 -#if M0 > 2 - acc2 = fma(b0, (half8)a2.s3, acc2); -#endif // M0 > 2 -#if M0 > 3 - acc3 = fma(b0, (half8)a3.s3, acc3); -#endif // M0 > 3 - - src_addr.s0 += 4 * sizeof(half); - } - - for(; i < (int)K; ++i) - { -#if defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - half a0 = *((__global half *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y + zin.s0)); -#if M0 > 1 - half a1 = *((__global half *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y + zin.s1)); -#endif // M0 > 1 -#if M0 > 2 - half a2 = *((__global half *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y + zin.s2)); -#endif // M0 > 2 -#if M0 > 3 - half a3 = *((__global half *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y + zin.s3)); -#endif // M0 > 3 -#else // defined(REINTERPRET_INPUT_AS_3D) - // Load values from matrix A - half a0 = *((__global half *)(src0_ptr + src_addr.s0 + 0 * src0_stride_y)); -#if M0 > 1 - half a1 = *((__global half *)(src0_ptr + src_addr.s0 + 1 * src0_stride_y)); -#endif // M0 > 1 -#if M0 > 2 - half a2 = *((__global half *)(src0_ptr + src_addr.s0 + 2 * src0_stride_y)); -#endif // M0 > 2 -#if M0 > 3 - half a3 = *((__global half *)(src0_ptr + src_addr.s0 + 3 * src0_stride_y)); -#endif // M0 > 3 -#endif // defined(REINTERPRET_INPUT_AS_3D) - - // Load values from matrix B - half8 b0 = vload8(0, (__global half *)(src1_ptr + src_addr.s1)); - - src_addr += (int2)(sizeof(half), src1_stride_y); - - // Accumulate - acc0 = fma(b0, (half8)a0, acc0); // b0 * (half8)a0; -#if M0 > 1 - acc1 = fma(b0, (half8)a1, acc1); // b0 * (half8)a1; -#endif // M0 > 1 -#if M0 > 2 - acc2 = fma(b0, (half8)a2, acc2); // b0 * (half8)a2; -#endif // M0 > 2 -#if M0 > 3 - acc3 = fma(b0, (half8)a3, acc3); // b0 * (half8)a3; -#endif // M0 > 3 - } - - int z = get_global_id(2); - - // Compute dst address - __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0) * dst_stride_y); - - uint4 zout = 0; - -#if defined(REINTERPRET_OUTPUT_AS_3D) - - // Since we store a 2D output tile in a 3D tensor, we need to check when the plane changes across the z dimension - // in order to take into account the presence of possible cross plane paddings - // - // | | - // | plane0 | - // | | - // |__________________| - // |******************| - // | cross_plane_pad | - // |******************| - // | | - // | plane1 | - // | | - // |__________________| - - // The plane (zout) is calculated dividing row by HEIGHT_GEMM3D - zout = ((uint4)(0, 1, 2, 3) + (uint4)(COMPUTE_M0_START_ROW(get_global_id(1), M0, PARTIAL_STORE_M0))) / (uint4)HEIGHT_GEMM3D; - zout = min(DEPTH_GEMM3D - 1, zout); - - // Add offset due to the cross plane paddings - zout *= (dst_cross_plane_pad * dst_stride_y); - - // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we - // multiply dst_stride_z by DEPTH_GEMM3D - dst_addr += z * dst_stride_z * DEPTH_GEMM3D; -#else // defined(REINTERPRET_OUTPUT_AS_3D) - // Add offset for batched GEMM - dst_addr += z * dst_stride_z; -#endif // defined(REINTERPRET_OUTPUT_AS_3D) - - // Multiply by the weight of matrix-matrix product and store the result -#if defined(ALPHA) - SCALE_BLOCK(M0, half, acc, ALPHA); -#endif // defined(ALPHA) - - // Add beta*bias -#if defined(BETA) - REPEAT_VAR_INIT_TO_CONST(M0, uint, zero, 0); - -#if defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)); - - LOAD_BLOCK(1, 8, half, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(1, half, bias, BETA); -#endif // UNIT_BIAS - - // acc = acc + bias[broadcasted] - ADD_BLOCK_BROADCAST(M0, acc, bias0); - -#else // defined(BROADCAST_BIAS) - __global uchar *src2_addr = src2_ptr + src2_offset_first_element_in_bytes + (get_global_id(0) * (uint)8 * sizeof(half)) + (COMPUTE_M0_START_ROW(get_global_id(1), M0, - PARTIAL_STORE_M0) - * src2_stride_y) - + z * src2_stride_z; - - LOAD_BLOCK(M0, 8, half, bias, src2_addr, 0, src2_stride_y, zero); - -#ifndef UNIT_BETA - SCALE_BLOCK(M0, half, bias, BETA); -#endif // UNIT_BIAS - - // acc = acc + bias - ADD_BLOCK(M0, acc, bias); - -#endif // defined(BROADCAST_BIAS) -#endif // defined(BETA) - -#if defined(ACTIVATION_TYPE) - ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, half, VEC_SIZE, acc, A_VAL, B_VAL); -#endif // defined(ACTIVATION_TYPE) - - // Store the output block - const bool cond_y = get_global_id(1) == 0; - const bool cond_x = ((get_global_id(0) + 1) * 8 >= N); - STORE_BLOCK_BOUNDARY_AWARE(M0, 8, half, acc, dst_addr, dst_stride_y, zout.s, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x); -} -#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) - -#endif // defined(N) && defined(K) && defined(M0) && defined(N0) && defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) \ No newline at end of file diff --git a/src/gpu/cl/ClKernelLibrary.cpp b/src/gpu/cl/ClKernelLibrary.cpp index 4af42262b9..9d524f936e 100644 --- a/src/gpu/cl/ClKernelLibrary.cpp +++ b/src/gpu/cl/ClKernelLibrary.cpp @@ -271,16 +271,6 @@ const std::map ClKernelLibrary::_kernel_program_map = { "gemm_ma_f32", "common/gemm.cl" }, { "gemm_mv", "common/gemv.cl" }, { "gemm_mv_quantized", "common/gemv.cl" }, - { "gemm_mm_interleaved_transposed_f16", "common/gemm_v1.cl" }, - { "gemm_mm_interleaved_transposed_f16_acc32", "common/gemm_v1.cl" }, - { "gemm_mm_interleaved_transposed_f16_bifrost", "common/gemm_v1.cl" }, - { "gemm_mm_interleaved_transposed_f32", "common/gemm_v1.cl" }, - { "gemm_mm_interleaved_transposed_f32_bifrost", "common/gemm_v1.cl" }, - { "gemm_mm_floating_point", "common/gemm_v1.cl" }, - { "gemm_mm_floating_point_f16_bifrost", "common/gemm_v1.cl" }, - { "gemm_mm_floating_point_f16_bifrost_acc32", "common/gemm_v1.cl" }, - { "gemm_mm_floating_point_f32_bifrost", "common/gemm_v1.cl" }, - { "gemm_mm_floating_point_f32_bifrost_1000", "common/gemm_v1.cl" }, { "gemm_mm_native", "common/gemm.cl" }, { "gemm_mm_reshaped_lhs_nt_rhs_t", "common/gemm.cl" }, { "gemm_mm_reshaped_lhs_nt_rhs_t_texture", "common/gemm.cl" }, @@ -589,10 +579,6 @@ const std::map ClKernelLibrary::_program_source_map = { "common/gemm.cl", #include "./cl_kernels/common/gemm.clembed" - }, - { - "common/gemm_v1.cl", -#include "./cl_kernels/common/gemm_v1.clembed" }, { "common/gemmlowp.cl", diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.cpp b/src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.cpp deleted file mode 100644 index 4e934f0f33..0000000000 --- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.cpp +++ /dev/null @@ -1,538 +0,0 @@ -/* - * Copyright (c) 2017-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h" - -#include "arm_compute/core/CL/CLHelpers.h" -#include "arm_compute/core/CL/CLKernelLibrary.h" -#include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/CL/OpenCL.h" -#include "arm_compute/core/Helpers.h" -#include "arm_compute/core/TensorInfo.h" -#include "arm_compute/core/Utils.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "src/core/AccessWindowStatic.h" -#include "src/core/CL/CLValidate.h" -#include "src/core/helpers/AutoConfiguration.h" -#include "src/core/helpers/WindowHelpers.h" -#include "src/core/utils/helpers/float_ops.h" -#include "support/Cast.h" -#include "support/StringSupport.h" - -namespace arm_compute -{ -namespace opencl -{ -namespace kernels -{ -namespace -{ -using ElementsProcessed = Steps; - -inline Status validate_arguments(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float beta, - bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision) -{ - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst); - ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src0); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1); - ARM_COMPUTE_RETURN_ERROR_ON_MSG((fp_mixed_precision && (src0->data_type() != DataType::F16)), "Mixed precision floating point is supported only for F16 data"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->num_dimensions() > 4, "The number of dimensions for the matrix A must be <= 4"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(src1->num_dimensions() > 3, "The number of dimensions for the matrix B must be <= 3"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_interleaved_transposed && reshape_info.reinterpret_input_as_3d(), "The input tensor cannot be reinterpreted as 3D if is_interleaved_transposed is true"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(src1->num_dimensions() > 2 && reshape_info.reinterpret_input_as_3d(), "The src1 tensor cannot have more than 2 dimensions if src0 has to be reinterpreted as 3D"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG((reshape_info.reinterpret_input_as_3d() || reshape_info.depth_output_gemm3d() != 0) && (src2 != nullptr) - && (!reshape_info.broadcast_bias()), - "Bias addition only supported with broadcast mode in case the input or dst has to be reinterpreted as 3D"); - - if(!is_interleaved_transposed) - { - ARM_COMPUTE_RETURN_ERROR_ON(src0->dimension(0) != src1->dimension(1)); - - if(src2 != nullptr && !(helpers::float_ops::is_zero(beta))) - { - const unsigned int m = reshape_info.reinterpret_input_as_3d() ? src0->dimension(1) * src0->dimension(2) : src0->dimension(1); - const unsigned int n = src1->dimension(0); - const unsigned int src2_dim0 = src2->dimension(0); - const unsigned int src2_dim1 = src2->dimension(1); - - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src2, src1); - if(reshape_info.broadcast_bias()) - { - ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim1 != 1 || src2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted"); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim0 != n || src2_dim1 != m), "Incorrect dimension of bias matrix"); - } - } - } - else - { - GEMMRHSMatrixInfo rhs_info; - GEMMLHSMatrixInfo lhs_info; - const auto m = static_cast(reshape_info.m()); - const auto n = static_cast(reshape_info.n()); - const int k = reshape_info.k(); - const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width(); - const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height(); - rhs_info.n0 = max_cl_vector_width / src1->element_size(); - rhs_info.k0 = 1; - rhs_info.h0 = mult_transpose1xW_width; - rhs_info.interleave = false; - rhs_info.transpose = false; - lhs_info.m0 = 4; - lhs_info.k0 = 4; - lhs_info.v0 = mult_interleave4x4_height; - lhs_info.interleave = true; - lhs_info.transpose = true; - - TensorShape tensor_shape0{ src0->tensor_shape() }; - tensor_shape0.set(0, k); - tensor_shape0.set(1, m); - - TensorShape tensor_shape1{ src1->tensor_shape() }; - tensor_shape1.set(0, n); - tensor_shape1.set(1, k); - - const TensorInfo tensor_info0 = src0->clone()->set_tensor_shape(tensor_shape0); - const TensorInfo tensor_info1 = src1->clone()->set_tensor_shape(tensor_shape1); - - const TensorInfo tensor_info_reshaped0 = src0->clone()->set_tensor_shape(misc::shape_calculator::compute_lhs_reshaped_shape(tensor_info0, lhs_info)); - const TensorInfo tensor_info_reshaped1 = src1->clone()->set_tensor_shape(misc::shape_calculator::compute_rhs_reshaped_shape(tensor_info1, rhs_info)); - - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src0, &tensor_info_reshaped0); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src1, &tensor_info_reshaped1); - - if(src2 != nullptr && !(helpers::float_ops::is_zero(beta))) - { - const unsigned int src2_dim0 = src2->dimension(0); - const unsigned int src2_dim1 = src2->dimension(1); - - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src2, src1); - if(reshape_info.broadcast_bias()) - { - ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim1 != 1 || src2_dim0 != n), "Incorrect dimension of bias matrix which is to be broadcasted"); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_MSG((src2_dim0 != n || src2_dim1 != m), "Incorrect dimension of bias matrix"); - } - } - } - - if(dst->total_size() != 0) - { - const TensorInfo tensor_info_dst = dst->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, is_interleaved_transposed, reshape_info)); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(dst, &tensor_info_dst); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, dst); - } - - return Status{}; -} - -inline std::pair validate_and_configure_window(ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, - float beta, bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target, - ElementsProcessed &num_elements_processed) -{ - ARM_COMPUTE_UNUSED(beta); - bool window_changed = false; - Window win{}; - Window win_out{}; - - const DataType data_type = src0->data_type(); - unsigned int &num_elems_processed_per_iteration_x = num_elements_processed[0]; - unsigned int &num_elems_processed_per_iteration_y = num_elements_processed[1]; - bool reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d(); - bool reinterpret_output_as_3d = (reshape_info.depth_output_gemm3d() != 0); - - // In case both input and dst have to be reinterpreted as 3D tensors, - // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false. - if(reinterpret_input_as_3d == reinterpret_output_as_3d) - { - reinterpret_input_as_3d = false; - reinterpret_output_as_3d = false; - } - - // dst tensor auto inizialitation if not yet initialized - auto_init_if_empty(*dst, src0->clone()->set_tensor_shape(misc::shape_calculator::compute_mm_shape(*src0, *src1, is_interleaved_transposed, reshape_info))); - - TensorInfo tmp_info(*dst); - - if(reinterpret_output_as_3d) - { - // Since the dst tensor has to be reinterpreted as 3D and the execute window is based on a 2D GEMM, - // the window needs to be constructed on the 2D collapsed version of the tensor - TensorShape tmp_shape(dst->tensor_shape()); - tmp_shape.collapse(2U, 1U); - tmp_info.set_tensor_shape(tmp_shape); - } - - if(is_interleaved_transposed) - { - // reinterpret_input_as_3d is not supported if is_interleaved_transposed is set - ARM_COMPUTE_ERROR_ON(reshape_info.reinterpret_input_as_3d()); - - // Configure kernel window - num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type); - num_elems_processed_per_iteration_y = 4; - - win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y)); - if(src2 != nullptr) - { - const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x; - - const int bias_processed_per_iteration_y = reshape_info.broadcast_bias() ? 1 : num_elems_processed_per_iteration_y; - - AccessWindowStatic src2_access(src2, 0, 0, - ceil_to_multiple(src2->dimension(0), bias_processed_per_iteration_x), - ceil_to_multiple(src2->dimension(1), bias_processed_per_iteration_y)); - - window_changed = update_window_and_padding(win, src2_access); // window used by the execute_window_loop - } - } - else // The input tensors have not been reshaped - { - // Special case for 1xN, 2xN, 3xN and 4xN src0 tensor. num_elems_processed_per_iteration_x is set up for the default case. - num_elems_processed_per_iteration_x = max_cl_vector_width / data_size_from_type(data_type); - num_elems_processed_per_iteration_y = std::min(static_cast(dst->dimension(1)), 4); - - // Create kernels according to the architecture, data type and input size. - GPUTarget arch_target = get_arch_from_target(gpu_target); - if(arch_target == GPUTarget::BIFROST && data_type == DataType::F32) - { - num_elems_processed_per_iteration_x = (src1->dimension(0) <= 1000 && src0->num_dimensions() == 1) ? 2 : 4; - } - - // Configure window - win = calculate_max_window(tmp_info, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y)); - win_out = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y)); - AccessWindowStatic src0_access(src0, 0, 0, src0->dimension(0), src0->dimension(1)); - AccessWindowStatic src1_access(src1, 0, 0, ceil_to_multiple(src1->dimension(0), num_elems_processed_per_iteration_x), src1->dimension(1)); - AccessWindowStatic dst_access(dst, 0, 0, - dst->dimension(0), - dst->dimension(1)); - - if(src2 != nullptr) - { - const int bias_processed_per_iteration_x = num_elems_processed_per_iteration_x; - - AccessWindowStatic src2_access(src2, 0, 0, - ceil_to_multiple(src2->dimension(0), bias_processed_per_iteration_x), - src2->dimension(1)); - - window_changed = update_window_and_padding(win, src0_access, src1_access, src2_access) || // window used by the execute_window_loop - update_window_and_padding(win_out, dst_access); // window used to update the padding requirements of dst tensor - } - else - { - window_changed = update_window_and_padding(win, src0_access, src1_access) || // window used by the execute_window_loop - update_window_and_padding(win_out, dst_access); // window used to update the padding requirements of dst tensor - } - } - - // Collapse along the Z direction - // This collapse needs to be here in order to tune the Z dimension of LWS - Window collapsed = win; - const unsigned int dimension_to_collapse = std::min(static_cast(dst->num_dimensions()), 2u); - collapsed = win.collapse(win, dimension_to_collapse); - - Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; - return std::make_pair(err, collapsed); -} -} // namespace - -ClGemmMatrixMultiplyKernel::ClGemmMatrixMultiplyKernel() -{ - _type = CLKernelType::GEMM; -} - -void ClGemmMatrixMultiplyKernel::configure(const CLCompileContext &compile_context, ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float alpha, - float beta, - bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, bool fp_mixed_precision, const ActivationLayerInfo &activation_info) -{ - ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst); - - // Perform validate step - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src0, src1, src2, dst, beta, - is_interleaved_transposed, reshape_info, fp_mixed_precision)); - - auto padding_info = is_interleaved_transposed ? get_padding_info({ src0, src1, dst }) : get_padding_info({ src0, dst }); - - _reinterpret_input_as_3d = reshape_info.reinterpret_input_as_3d(); - _reinterpret_output_as_3d = (reshape_info.depth_output_gemm3d() != 0); - _add_bias = src2 != nullptr; - - // In case both input and dst have to be reinterpreted as 3D tensors, - // force reinterpret_input_as_3d and reinterpret_output_as_3d to be false. - if(_reinterpret_input_as_3d == _reinterpret_output_as_3d) - { - _reinterpret_input_as_3d = false; - _reinterpret_output_as_3d = false; - } - - // Check if we need to slide the matrix B - const unsigned int num_dimensions_src0 = _reinterpret_input_as_3d ? src0->num_dimensions() - 1 : src0->num_dimensions(); - - _slide_matrix_b = (src1->num_dimensions() >= num_dimensions_src0); - - const DataType data_type = src0->data_type(); - - // Get target architecture - GPUTarget gpu_target = get_target(); - - ElementsProcessed num_elements_processed{}; - - // Configure kernel window - auto win_config = validate_and_configure_window(src0, src1, src2, dst, beta, is_interleaved_transposed, reshape_info, - gpu_target, num_elements_processed); - ARM_COMPUTE_ERROR_THROW_ON(win_config.first); - ICLKernel::configure_internal(win_config.second); - - // If _reinterpret_input_as_3d = _reinterpret_output_as_3d = true, both will be turned off (false) - // in which case we will dispatch a batched-GEMM to reduce the complexity of the address calculation within the OpenCL kernel. - // This means that the actual m used by the kernel is given by dst->dimension(1) - const unsigned int internal_m = _reinterpret_output_as_3d ? dst->dimension(1) * dst->dimension(2) : dst->dimension(1); - const unsigned int n = dst->dimension(0); - - const unsigned int h_gemm_3d = _reinterpret_output_as_3d ? dst->dimension(1) : src0->dimension(1); - const unsigned int d_gemm_3d = _reinterpret_output_as_3d ? dst->dimension(2) : src0->dimension(2); - - const unsigned int m0 = num_elements_processed.y(); - const unsigned int n0 = num_elements_processed.x(); - - // Calculate partial (store instead of load) M0 and partial N0 for the partial blocks at the end of a row/column if any. This is to avoid padding. - const unsigned int partial_store_m0 = internal_m % m0; - const unsigned int partial_store_n0 = n % n0; - - // Create build options - CLBuildOptions build_opts; - - build_opts.add_option_if(!(helpers::float_ops::is_one(alpha)), "-DALPHA=" + float_to_string_with_full_precision(alpha)); - build_opts.add_option_if(src2 != nullptr, "-DBETA=" + float_to_string_with_full_precision(beta)); - build_opts.add_option_if(helpers::float_ops::is_one(beta), "-DUNIT_BETA"); - build_opts.add_option_if(reshape_info.broadcast_bias(), "-DBROADCAST_BIAS"); - build_opts.add_option_if(_reinterpret_input_as_3d, "-DREINTERPRET_INPUT_AS_3D"); - build_opts.add_option_if(_reinterpret_output_as_3d, "-DREINTERPRET_OUTPUT_AS_3D"); - build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DHEIGHT_GEMM3D=" + support::cpp11::to_string(h_gemm_3d)); - build_opts.add_option_if(_reinterpret_input_as_3d || _reinterpret_output_as_3d, "-DDEPTH_GEMM3D=" + support::cpp11::to_string(d_gemm_3d)); - build_opts.add_option_if(!_slide_matrix_b, "-DMATRIX_B_DEPTH=" + support::cpp11::to_string(src1->dimension(2))); - build_opts.add_option_if(activation_info.enabled(), "-DACTIVATION_TYPE=" + lower_string(string_from_activation_func(activation_info.activation()))); - build_opts.add_option_if(activation_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(activation_info.a())); - build_opts.add_option_if(activation_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(activation_info.b())); - build_opts.add_option("-DIN1_DIM_X=" + support::cpp11::to_string(src1->dimension(0))); - - const bool is_bifrost = get_arch_from_target(gpu_target) == GPUTarget::BIFROST; - - std::string kernel_name; - if(is_interleaved_transposed) - { - const int mult_transpose1xW_width = reshape_info.mult_transpose1xW_width(); - const int mult_interleave4x4_height = reshape_info.mult_interleave4x4_height(); - - build_opts.add_option("-DM=" + support::cpp11::to_string(internal_m)); - build_opts.add_option("-DN=" + support::cpp11::to_string(n)); - build_opts.add_option("-DK=" + support::cpp11::to_string(src1->dimension(0) / (n0 * mult_transpose1xW_width))); - build_opts.add_option("-DH0=" + support::cpp11::to_string(mult_transpose1xW_width)); - build_opts.add_option("-DV0=" + support::cpp11::to_string(mult_interleave4x4_height)); - build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0)); - build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0)); - - if(is_data_type_float(data_type) && is_bifrost) - { - kernel_name = "gemm_mm_interleaved_transposed_" + lower_string(string_from_data_type(data_type)) + "_bifrost"; - } - else - { - kernel_name = "gemm_mm_interleaved_transposed_" + lower_string(string_from_data_type(data_type)); - if(fp_mixed_precision && data_type == DataType::F16) - { - // currently wider accumulator is only supported for fp16 kernels. - kernel_name += "_acc32"; - } - } - } - else // The input tensors have not been reshaped - { - build_opts.add_option("-DN=" + support::cpp11::to_string(n)); - build_opts.add_option("-DK=" + support::cpp11::to_string(src0->dimension(0))); - build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)); - build_opts.add_option("-DM0=" + support::cpp11::to_string(m0)); - build_opts.add_option("-DN0=" + support::cpp11::to_string(n0)); - build_opts.add_option("-DPARTIAL_STORE_M0=" + support::cpp11::to_string(partial_store_m0)); - build_opts.add_option("-DPARTIAL_STORE_N0=" + support::cpp11::to_string(partial_store_n0)); - - // Create kernels according to the architecture, data type and input size. - if(is_data_type_float(data_type) && is_bifrost) - { - kernel_name = "gemm_mm_floating_point"; - - if(src0->num_dimensions() != 1) - { - kernel_name += "_" + lower_string(string_from_data_type(data_type)) + "_bifrost"; - if(fp_mixed_precision && data_type == DataType::F16) - { - // currently wider accumulator is only supported for fp16 kernels. - kernel_name += "_acc32"; - } - } - else if(src1->dimension(0) <= 1000 && data_type == DataType::F32) - { - // The first kernel is optimized for the case of 1000 or less dst elements (e.g. FC8 of AlexNet and VGG-16, and - // FC1 of Inception v3). The second kernel is optimized for the case of greater than 1000 dst elements (e.g. - // FC6 and FC7 of AlexNet and VGG-16). - kernel_name += "_" + lower_string(string_from_data_type(data_type)) + "_bifrost_1000"; - } - - // The work-group size equal to the Bifrost quad size has been proved to be optimal for these kernels - // via exhaustive autotuning over a range of representative layer configurations. - set_lws_hint(cl::NDRange(4)); - } - else // (MIDGARD and F32) or (F16) - { - kernel_name = "gemm_mm_floating_point"; - } - } - // Create kernel - _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); - - // Set config_id for enabling LWS tuning - _config_id = "gemm_"; - _config_id += (is_interleaved_transposed ? "reshaped_" : ""); - _config_id += (_add_bias ? "add_bias_" : ""); - _config_id += (reshape_info.broadcast_bias() ? "broadcast_bias_" : ""); - _config_id += (fp_mixed_precision ? "fp_mixed_" : ""); - _config_id += (_reinterpret_input_as_3d ? "3di_" : ""); - _config_id += (_reinterpret_output_as_3d ? "3do_" : ""); - _config_id += lower_string(string_from_data_type(src0->data_type())); - _config_id += "_"; - _config_id += support::cpp11::to_string(dst->dimension(1)); - _config_id += "_"; - _config_id += support::cpp11::to_string(dst->dimension(0)); - _config_id += "_"; - _config_id += support::cpp11::to_string(dst->dimension(2)); - _config_id += "_"; - _config_id += support::cpp11::to_string(dst->dimension(3)); - _config_id += "_"; - _config_id += (is_interleaved_transposed ? support::cpp11::to_string(src1->dimension(0)) : support::cpp11::to_string(src1->dimension(1))); - - ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info)); -} - -Status ClGemmMatrixMultiplyKernel::validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float alpha, float beta, - bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target, bool fp_mixed_precision, const ActivationLayerInfo &activation_info) -{ - // Note: num_elements_processed will be set in validate_and_configure_window() - ElementsProcessed num_elements_processed{}; - ARM_COMPUTE_UNUSED(alpha); - ARM_COMPUTE_UNUSED(activation_info); - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src0, src1, src2, dst, beta, is_interleaved_transposed, reshape_info, fp_mixed_precision)); - ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src0->clone().get(), - src1->clone().get(), - (src2 != nullptr) ? src2->clone().get() : nullptr, - dst->clone().get(), - beta, - is_interleaved_transposed, - reshape_info, - gpu_target, - num_elements_processed) - .first); - - return Status{}; -} - -void ClGemmMatrixMultiplyKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); - - const auto src0 = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_0)); - const auto src1 = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_1)); - const auto src2 = utils::cast::polymorphic_downcast(tensors.get_const_tensor(TensorType::ACL_SRC_2)); - auto dst = utils::cast::polymorphic_downcast(tensors.get_tensor(TensorType::ACL_DST)); - - ARM_COMPUTE_ERROR_ON_NULLPTR(src0, src1, dst); - ARM_COMPUTE_ERROR_ON(_add_bias && src2 == nullptr); - - if(src1->info()->num_dimensions() < 3) - { - // The stride_z for matrix B must be zero if we do not slice - ARM_COMPUTE_ERROR_ON(src1->info()->strides_in_bytes()[3] != 0); - } - - Window slice = window.first_slice_window_3D(); - Window slice_matrix_b = slice; - - slice_matrix_b.set(Window::DimX, Window::Dimension(0, 1, 1)); - slice_matrix_b.set(Window::DimY, Window::Dimension(0, 1, 1)); - - const unsigned int num_arguments_bias = _add_bias ? num_arguments_per_2D_tensor() + 1 : 0; - - if(_reinterpret_input_as_3d) - { - // Pass bottom paddings to the kernel if the input has to be reinterpreted as 3D tensor - const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3 + num_arguments_bias; - const unsigned int total_cross_plane_pad = src0->info()->padding().top + src0->info()->padding().bottom; - _kernel.setArg(idx0, static_cast(total_cross_plane_pad)); - } - - if(_reinterpret_output_as_3d) - { - // Pass bottom paddings to the kernel if the dst has to be reinterpreted as 3D tensor - const unsigned int idx0 = 3 * num_arguments_per_2D_tensor() + 3 + (_reinterpret_input_as_3d ? 1 : 0) + num_arguments_bias; - const unsigned int total_cross_plane_pad = dst->info()->padding().top + dst->info()->padding().bottom; - _kernel.setArg(idx0, static_cast(total_cross_plane_pad)); - } - - do - { - Window slice_b = slice; - // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2 - // This scenario can happen when the matrix multiplication is used to perform a convolution operation - if(!_slide_matrix_b) - { - slice_b = slice_matrix_b; - } - - unsigned int idx = 0; - add_2D_tensor_argument(idx, src0, slice); - add_2D_tensor_argument(idx, src1, slice_b); - if(_add_bias) - { - add_2D_tensor_argument(idx, src2, slice); - } - add_2D_tensor_argument(idx, dst, slice); - _kernel.setArg(idx++, static_cast(src0->info()->strides_in_bytes()[2])); - _kernel.setArg(idx++, static_cast(src1->info()->strides_in_bytes()[2])); - if(_add_bias) - { - _kernel.setArg(idx++, static_cast(src2->info()->strides_in_bytes()[2])); - } - _kernel.setArg(idx++, static_cast(dst->info()->strides_in_bytes()[2])); - enqueue(queue, *this, slice, lws_hint()); - } - while(window.slide_window_slice_3D(slice)); -} -} // namespace kernels -} // namespace opencl -} // namespace arm_compute diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h b/src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h deleted file mode 100644 index c16e3279f5..0000000000 --- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright (c) 2017-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_KERNEL_H -#define ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_KERNEL_H - -#include "src/core/common/Macros.h" -#include "src/gpu/cl/ClCompileContext.h" -#include "src/gpu/cl/IClKernel.h" - -namespace arm_compute -{ -namespace opencl -{ -namespace kernels -{ -/** OpenCL kernel to multiply two input matrices "A" and "B" and add a martix "C" if provided. All elements of the output matrix will be multiplied by alpha. In case matrix C is passed, it will be added to the previous result. - * For the matrix C, the broadcast addition is supported if the flag "broadcast_bias" is set in the GEMMReshapeInfo object - * - * @note If the input tensors @p src0 and @p src1 have been reshaped respectively with @ref ClGemmReshapeLhsMatrixKernel" and @ref ClGemmReshapeRhsMatrixKernel, - * the flag @p is_interleaved_transposed must be set to true - * - * @attention @p src1 tensor must have at least 2 dimensions (matrix) - */ -class ClGemmMatrixMultiplyKernel : public IClKernel -{ -public: - ClGemmMatrixMultiplyKernel(); - ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClGemmMatrixMultiplyKernel); - /** Initialise the kernel's input, output and alpha - * - * @param[in] compile_context The compile context to be used. - * @param[in] src0 Input tensor containing the Matrix A. Data types supported: F16/F32 - * @param[in] src1 Input tensor containing the Matrix B. Data type supported: same as @p src0 - * @param[in] src2 Input tensor containing the Matrix C (bias). Can be nullptr. Data type supported: same as @p src0 - * @param[out] dst Output tensor to store the result of matrix multiplication. Data type supported: same as @p src0 - * @param[in] alpha Weight of the matrix product - * @param[in] beta (Optional) Weight of vector C. Default value is 0. Only beta = 1 is currently supported. - * @param[in] is_interleaved_transposed (Optional) True if input0 and input1 have been reshaped respectively using @ref ClGemmReshapeLhsMatrixKernel and @ref ClGemmReshapeRhsMatrixKernel - * @param[in] reshape_info (Optional) GEMM reshape info. If is_interleaved_transposed = true, this object must contain the information to understand how the matrix A and matrix B have been reshaped - * @param[in] fp_mixed_precision (Optional) Use wider accumulators (32 bit instead of 16 for FP16) to improve accuracy - * @param[in] activation_info (Optional) Activation to apply after the matrix multiplication - * - */ - void configure(const ClCompileContext &compile_context, ITensorInfo *src0, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, float alpha, float beta = 0.f, - bool is_interleaved_transposed = true, const GEMMReshapeInfo &reshape_info = GEMMReshapeInfo(), bool fp_mixed_precision = false, const ActivationLayerInfo &activation_info = ActivationLayerInfo()); - /** Static function to check if given info will lead to a valid configuration - * - * Similar to @ref ClGemmMatrixMultiplyKernel::configure() - * - * @return a status - */ - static Status validate(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, float alpha, float beta, - bool is_interleaved_transposed, const GEMMReshapeInfo &reshape_info, GPUTarget gpu_target, bool fp_mixed_precision = false, const ActivationLayerInfo &activation_info = ActivationLayerInfo()); - - // Inherited methods overridden: - void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override; - -public: - bool _slide_matrix_b{ true }; - bool _reinterpret_input_as_3d{ false }; - bool _reinterpret_output_as_3d{ false }; - bool _add_bias{ false }; -}; -} // namespace kernels -} // namespace opencl -} // namespace arm_compute -#endif /* ARM_COMPUTE_CL_GEMM_MATRIXMULTIPLY_KERNEL_H */ diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp b/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp index 448d35353b..6c872fd48c 100644 --- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp +++ b/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.cpp @@ -55,7 +55,7 @@ Status validate_arguments(const ITensorInfo *src0, const ITensorInfo *src1, cons { ARM_COMPUTE_UNUSED(alpha); ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src0, src1, dst); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src0, 1, DataType::F32, DataType::F16); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src0, src1); ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->num_dimensions() > 4, "The number of dimensions for the LHS matrix must be <= 4"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(src1->num_dimensions() > 3, "The number of dimensions for the RHS matrix must be <= 3"); diff --git a/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h b/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h index 26dec918cd..89837cc515 100644 --- a/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h +++ b/src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h @@ -44,7 +44,7 @@ public: /** Initialise the kernel's input and dst. * * @param[in] compile_context The compile context to be used. - * @param[in] src0 Input tensor for the LHS matrix. Data type supported: F32. The number of dimensions for the LHS matrix must be less or equal than 4. + * @param[in] src0 Input tensor for the LHS matrix. Data type supported: F32/F16. The number of dimensions for the LHS matrix must be less or equal than 4. * @param[in] src1 Input tensor for the RHS matrix. Data type supported: same as @p src0. The number of dimensions for the RHS matrix must be less or equal than 3. * @param[in] src2 Input tensor containing the bias matrix. Data type supported: same as @p src0. * @param[out] dst dst tensor info. Data type supported: same as @p src0 diff --git a/src/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.cpp b/src/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.cpp index b9eac2412e..d74c7fac9b 100644 --- a/src/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.cpp +++ b/src/gpu/cl/kernels/gemm/native/ClGemmDefaultConfigNativeBifrost.cpp @@ -101,7 +101,7 @@ std::pair ClGemmDefaultConfigNativeBifrost } else { - return configure_lhs_rhs_info(m, n, 5, 4, 2, 1, 1, false, false, false, false); + return configure_lhs_rhs_info(m, n, 4, 4, 4, 1, 1, false, false, false, false); } } diff --git a/src/gpu/cl/operators/ClFullyConnected.h b/src/gpu/cl/operators/ClFullyConnected.h index dc5f9e5c9b..b5ac70c93b 100644 --- a/src/gpu/cl/operators/ClFullyConnected.h +++ b/src/gpu/cl/operators/ClFullyConnected.h @@ -46,7 +46,7 @@ class ClTranspose; * * -# @ref opencl::kernels::ClIm2ColKernel (called when the input comes from a convolutional layer) * -# @ref CLTranspose (if @p are_weights_reshaped is set to false and transpose_weights is set to true ) (called once) - * -# @ref opencl::kernels::ClGemmMatrixMultiplyKernel or @ref CLGEMMLowpMatrixMultiplyCore (if quantized asymmetric) + * -# @ref opencl::ClGemm or @ref CLGEMMLowpMatrixMultiplyCore (if quantized asymmetric) * * @note The fully connected layer accepts "weights" tensors only with 2 dimensions. */ diff --git a/src/gpu/cl/operators/ClGemm.cpp b/src/gpu/cl/operators/ClGemm.cpp index 4cd5237b11..d2d0f8f91d 100644 --- a/src/gpu/cl/operators/ClGemm.cpp +++ b/src/gpu/cl/operators/ClGemm.cpp @@ -64,27 +64,14 @@ namespace { inline bool validate_gemm_kernel(CLGEMMKernelType kernel_type) { - switch(kernel_type) - { - case CLGEMMKernelType::NATIVE_V1: - case CLGEMMKernelType::RESHAPED_ONLY_RHS: - case CLGEMMKernelType::RESHAPED_V1: - case CLGEMMKernelType::RESHAPED: - { - return true; - } - default: - { - return false; - } - } + return kernel_type == CLGEMMKernelType::NATIVE? false : true; } //Automatically select between mlgo (prioritized) and default heuristics for gemm kernel type inline CLGEMMKernelType auto_select_gemm_kernel(auto_heuristics::CommonQuery query, bool reshape_b_only_on_first_run, bool constant_weights) { if(!constant_weights) { - return CLGEMMKernelType::NATIVE_V1; + return CLGEMMKernelType::NATIVE; } auto gemm_kernel = auto_heuristics::select_mlgo_gemm_kernel(query, reshape_b_only_on_first_run); @@ -198,97 +185,54 @@ inline std::pair auto_select_gemm_config_r } // namespace ClGemm::ClGemm() - : _mm_kernel(std::make_unique()), - _reshape_lhs_kernel(std::make_unique()), + : _reshape_lhs_kernel(std::make_unique()), _reshape_rhs_kernel(std::make_unique()), + _mm_native_kernel(std::make_unique()), _mm_reshaped_kernel(std::make_unique()), _mm_reshaped_only_rhs_kernel(std::make_unique()), _mm_reshaped_only_rhs_fallback_kernel(std::make_unique()), _tmp_a(), _tmp_b(), _reshape_b_only_on_first_run(false), - _gemm_kernel_type(CLGEMMKernelType::NATIVE_V1), + _gemm_kernel_type(CLGEMMKernelType::NATIVE), _is_prepared(false), _aux_mem(AuxTensorIdx::Count) { } -void ClGemm::configure_native_v1(const CLCompileContext &compile_context, ITensorInfo *a, ITensorInfo *b, ITensorInfo *c, ITensorInfo *output, float alpha, float beta, - const GEMMInfo &gemm_info) +void ClGemm::configure_native(const CLCompileContext &compile_context, ITensorInfo *a, ITensorInfo *b, ITensorInfo *c, ITensorInfo *output, float alpha, float beta, + const GEMMInfo &gemm_info) { - const unsigned int m = gemm_info.reinterpret_input_as_3d() ? (a->dimension(1) * a->dimension(2)) : a->dimension(1); - const unsigned int n = b->dimension(0); - const unsigned int k = a->dimension(0); - const GPUTarget gpu_target = CLScheduler::get().target(); - - // Set the target for the kernels - _mm_kernel->set_target(gpu_target); - - GEMMReshapeInfo reshape_info(m, n, k, 1, 1, gemm_info.depth_output_gemm3d(), gemm_info.reinterpret_input_as_3d(), gemm_info.broadcast_bias()); - - // Configure and tune matrix multiply kernel - _mm_kernel->configure(compile_context, a, b, c, output, alpha, beta, false, reshape_info, gemm_info.fp_mixed_precision(), gemm_info.activation_info()); - - // Tune kernel statically - CLScheduler::get().tune_kernel_static(*_mm_kernel); -} + DataType data_type = a->data_type(); + bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); + const unsigned int m = reinterpret_input_as_3d ? (a->dimension(1) * a->dimension(2)) : a->dimension(1); + const unsigned int n = b->dimension(0); + const unsigned int k = a->dimension(0); + const unsigned int batch_size = reinterpret_input_as_3d ? a->dimension(3) : a->dimension(2); + const int depth_output_gemm3d = gemm_info.depth_output_gemm3d(); + const GPUTarget gpu_target = CLScheduler::get().target(); + bool broadcast_bias = gemm_info.broadcast_bias(); -void ClGemm::configure_reshaped_v1(const CLCompileContext &compile_context, ITensorInfo *a, ITensorInfo *b, ITensorInfo *c, ITensorInfo *output, float alpha, float beta, - const GEMMInfo &gemm_info) -{ - bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); - const unsigned int m = reinterpret_input_as_3d ? (a->dimension(1) * a->dimension(2)) : a->dimension(1); - const unsigned int n = b->dimension(0); - const unsigned int k = a->dimension(0); - const int depth_output_gemm3d = gemm_info.depth_output_gemm3d(); - const GPUTarget gpu_target = CLScheduler::get().target(); - int mult_transpose1xW_width = 1; - int mult_interleave4x4_height = 1; + GEMMKernelInfo kernel_info; + kernel_info.m = m; + kernel_info.n = n; + kernel_info.k = k; + kernel_info.depth_output_gemm3d = depth_output_gemm3d; + kernel_info.reinterpret_input_as_3d = reinterpret_input_as_3d; + kernel_info.broadcast_bias = broadcast_bias; + kernel_info.activation_info = gemm_info.activation_info(); // Set the target for the kernels - _reshape_lhs_kernel->set_target(gpu_target); - _mm_kernel->set_target(gpu_target); - - if(get_arch_from_target(gpu_target) == GPUTarget::BIFROST) - { - mult_transpose1xW_width = 4; - mult_interleave4x4_height = 2; - } - - GEMMRHSMatrixInfo rhs_info; - rhs_info.n0 = 16 / b->element_size(); - rhs_info.k0 = 1; - rhs_info.h0 = mult_transpose1xW_width; - rhs_info.interleave = false; - rhs_info.transpose = false; - - GEMMLHSMatrixInfo lhs_info; - lhs_info.m0 = 4; - lhs_info.k0 = 4; - lhs_info.v0 = mult_interleave4x4_height; - lhs_info.interleave = true; - lhs_info.transpose = true; + _mm_native_kernel->set_target(gpu_target); - GEMMReshapeInfo reshape_info(m, n, k, mult_transpose1xW_width, mult_interleave4x4_height, depth_output_gemm3d, false, gemm_info.broadcast_bias()); - - // Configure interleave kernel - _reshape_lhs_kernel->configure(compile_context, a, &_tmp_a, lhs_info, reinterpret_input_as_3d); - - // Configure transpose kernel - _reshape_rhs_kernel->configure(compile_context, b, &_tmp_b, rhs_info); + auto config = auto_heuristics::select_mlgo_gemm_config_reshaped_only_rhs(auto_heuristics::CommonQuery{ gpu_target, data_type, m, n, k, batch_size }); // Configure and tune matrix multiply kernel - _mm_kernel->configure(compile_context, &_tmp_a, &_tmp_b, c, output, alpha, beta, true, reshape_info, gemm_info.fp_mixed_precision(), gemm_info.activation_info()); - - CLScheduler::get().tune_kernel_static(*_mm_kernel); - - // Request memory for LHS and RHS reshape matrix - _aux_mem[LhsReshape] = MemoryInfo(offset_int_vec(LhsReshape), MemoryLifetime::Temporary, _tmp_a.total_size()); - _aux_mem[RhsReshape] = MemoryInfo(offset_int_vec(RhsReshape), _reshape_b_only_on_first_run ? MemoryLifetime::Persistent : MemoryLifetime::Temporary, _tmp_b.total_size()); + _mm_native_kernel->configure(compile_context, a, b, c, output, alpha, beta, config.lhs_info, config.rhs_info, kernel_info); } -void ClGemm::configure_reshaped_v2(const CLCompileContext &compile_context, ITensorInfo *a, ITensorInfo *b, ITensorInfo *c, ITensorInfo *output, float alpha, float beta, - const GEMMInfo &gemm_info) +void ClGemm::configure_reshaped(const CLCompileContext &compile_context, ITensorInfo *a, ITensorInfo *b, ITensorInfo *c, ITensorInfo *output, float alpha, float beta, + const GEMMInfo &gemm_info) { DataType data_type = a->data_type(); bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); @@ -311,7 +255,7 @@ void ClGemm::configure_reshaped_v2(const CLCompileContext &compile_context, ITen // Set the target for the kernels _reshape_lhs_kernel->set_target(gpu_target); - _mm_kernel->set_target(gpu_target); + _mm_reshaped_kernel->set_target(gpu_target); GEMMLHSMatrixInfo lhs_info{}; GEMMRHSMatrixInfo rhs_info{}; @@ -354,7 +298,8 @@ void ClGemm::configure_reshaped_only_rhs(const CLCompileContext &compile_context kernel_info.activation_info = gemm_info.activation_info(); // Set the target for the kernels - _mm_kernel->set_target(gpu_target); + _mm_reshaped_only_rhs_kernel->set_target(gpu_target); + _mm_reshaped_only_rhs_fallback_kernel->set_target(gpu_target); GEMMLHSMatrixInfo lhs_info{}; GEMMRHSMatrixInfo rhs_info{}; @@ -381,78 +326,35 @@ void ClGemm::configure_reshaped_only_rhs(const CLCompileContext &compile_context _aux_mem[RhsReshape] = MemoryInfo(offset_int_vec(RhsReshape), _reshape_b_only_on_first_run ? MemoryLifetime::Persistent : MemoryLifetime::Temporary, _tmp_b.total_size()); } -Status ClGemm::validate_native_v1(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info) +Status ClGemm::validate_native(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info) { ARM_COMPUTE_UNUSED(alpha); ARM_COMPUTE_UNUSED(output); // Get the GPU target const GPUTarget gpu_target = CLScheduler::get().target(); + DataType data_type = a->data_type(); bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); const unsigned int m = reinterpret_input_as_3d ? (a->dimension(1) * a->dimension(2)) : a->dimension(1); const unsigned int n = b->dimension(0); const unsigned int k = a->dimension(0); + const unsigned int batch_size = reinterpret_input_as_3d ? a->dimension(3) : a->dimension(2); const int depth_output_gemm3d = gemm_info.depth_output_gemm3d(); + const bool broadcast_bias = gemm_info.broadcast_bias(); - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(m, n, k, 1, 1, depth_output_gemm3d, reinterpret_input_as_3d, gemm_info.broadcast_bias()); - - // Validate matrix multiply - ARM_COMPUTE_RETURN_ON_ERROR(ClGemmMatrixMultiplyKernel::validate(a, b, c, output, alpha, beta, - false, reshape_info, gpu_target, gemm_info.fp_mixed_precision(), gemm_info.activation_info())); - - return Status{}; -} - -Status ClGemm::validate_reshaped_v1(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info) -{ - ARM_COMPUTE_UNUSED(alpha); - ARM_COMPUTE_UNUSED(output); - - TensorInfo tmp_a_info{}; - TensorInfo tmp_b_info{}; - - // Get the GPU target - const GPUTarget gpu_target = CLScheduler::get().target(); - const unsigned int m = gemm_info.reinterpret_input_as_3d() ? (a->dimension(1) * a->dimension(2)) : a->dimension(1); - const unsigned int n = b->dimension(0); - const unsigned int k = a->dimension(0); - int mult_transpose1xW_width = 1; - int mult_interleave4x4_height = 1; - const int depth_output_gemm3d = gemm_info.depth_output_gemm3d(); - - if(get_arch_from_target(gpu_target) == GPUTarget::BIFROST) - { - mult_transpose1xW_width = 4; - mult_interleave4x4_height = 2; - } - - GEMMRHSMatrixInfo rhs_info; - rhs_info.n0 = 16 / b->element_size(); - rhs_info.k0 = 1; - rhs_info.h0 = mult_transpose1xW_width; - rhs_info.interleave = false; - rhs_info.transpose = false; - - GEMMLHSMatrixInfo lhs_info; - lhs_info.m0 = 4; - lhs_info.k0 = 4; - lhs_info.v0 = mult_interleave4x4_height; - lhs_info.interleave = true; - lhs_info.transpose = true; - - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(m, n, k, mult_transpose1xW_width, mult_interleave4x4_height, depth_output_gemm3d, false, gemm_info.broadcast_bias()); - - // Validate interleave kernel - auto_init_if_empty(tmp_a_info, a->clone()->set_tensor_shape(compute_lhs_reshaped_shape(*a, lhs_info, gemm_info.reinterpret_input_as_3d()))); - ARM_COMPUTE_RETURN_ON_ERROR(ClGemmReshapeLhsMatrixKernel::validate(a, &tmp_a_info, lhs_info, gemm_info.reinterpret_input_as_3d())); + GEMMKernelInfo kernel_info; + kernel_info.m = m; + kernel_info.n = n; + kernel_info.k = k; + kernel_info.depth_output_gemm3d = depth_output_gemm3d; + kernel_info.reinterpret_input_as_3d = reinterpret_input_as_3d; + kernel_info.broadcast_bias = broadcast_bias; + kernel_info.activation_info = gemm_info.activation_info(); - // Validate transpose kernel - auto_init_if_empty(tmp_b_info, b->clone()->set_tensor_shape(compute_rhs_reshaped_shape(*b, rhs_info))); - ARM_COMPUTE_RETURN_ON_ERROR(ClGemmReshapeRhsMatrixKernel::validate(b, &tmp_b_info, rhs_info)); + auto config = auto_heuristics::select_mlgo_gemm_config_reshaped_only_rhs(auto_heuristics::CommonQuery{ gpu_target, data_type, m, n, k, batch_size }); // Validate matrix multiply - ARM_COMPUTE_RETURN_ON_ERROR(ClGemmMatrixMultiplyKernel::validate(&tmp_a_info, &tmp_b_info, c, output, alpha, beta, - true, reshape_info, gpu_target, gemm_info.fp_mixed_precision(), gemm_info.activation_info())); + ARM_COMPUTE_RETURN_ON_ERROR(ClGemmMatrixMultiplyNativeKernel::validate(a, b, c, output, alpha, beta, config.lhs_info, config.rhs_info, kernel_info)); return Status{}; } @@ -583,19 +485,14 @@ void ClGemm::configure(const CLCompileContext &compile_context, ITensorInfo *a, switch(_gemm_kernel_type) { - case CLGEMMKernelType::NATIVE_V1: - { - configure_native_v1(compile_context, a, b, c_to_use, output, alpha, beta, gemm_info); - break; - } - case CLGEMMKernelType::RESHAPED_V1: + case CLGEMMKernelType::NATIVE: { - configure_reshaped_v1(compile_context, a, b, c_to_use, output, alpha, beta, gemm_info); + configure_native(compile_context, a, b, c_to_use, output, alpha, beta, gemm_info); break; } case CLGEMMKernelType::RESHAPED: { - configure_reshaped_v2(compile_context, a, b, c_to_use, output, alpha, beta, gemm_info); + configure_reshaped(compile_context, a, b, c_to_use, output, alpha, beta, gemm_info); break; } case CLGEMMKernelType::RESHAPED_ONLY_RHS: @@ -632,14 +529,9 @@ Status ClGemm::validate(const ITensorInfo *a, const ITensorInfo *b, const ITenso switch(gemm_kernel_type) { - case CLGEMMKernelType::NATIVE_V1: + case CLGEMMKernelType::NATIVE: { - ARM_COMPUTE_RETURN_ON_ERROR(validate_native_v1(a, b, c_to_use, output, alpha, beta, gemm_info)); - break; - } - case CLGEMMKernelType::RESHAPED_V1: - { - ARM_COMPUTE_RETURN_ON_ERROR(validate_reshaped_v1(a, b, c_to_use, output, alpha, beta, gemm_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_native(a, b, c_to_use, output, alpha, beta, gemm_info)); break; } case CLGEMMKernelType::RESHAPED: @@ -679,12 +571,11 @@ void ClGemm::run(ITensorPack &tensors) // Run matrix multiply kernel switch(_gemm_kernel_type) { - case CLGEMMKernelType::NATIVE_V1: + case CLGEMMKernelType::NATIVE: { - CLScheduler::get().enqueue_op(*_mm_kernel, tensors, true); + CLScheduler::get().enqueue_op(*_mm_native_kernel, tensors, true); break; } - case CLGEMMKernelType::RESHAPED_V1: case CLGEMMKernelType::RESHAPED: { // Run interleave kernel @@ -704,10 +595,6 @@ void ClGemm::run(ITensorPack &tensors) { CLScheduler::get().enqueue_op(*_mm_reshaped_kernel, gemm_reshaped_pack, true); } - else - { - CLScheduler::get().enqueue_op(*_mm_kernel, gemm_reshaped_pack, true); - } break; } case CLGEMMKernelType::RESHAPED_ONLY_RHS: diff --git a/src/gpu/cl/operators/ClGemm.h b/src/gpu/cl/operators/ClGemm.h index 60bb78c371..fd53648b3c 100644 --- a/src/gpu/cl/operators/ClGemm.h +++ b/src/gpu/cl/operators/ClGemm.h @@ -31,7 +31,6 @@ #include "src/gpu/cl/ClCompileContext.h" #include "src/gpu/cl/IClKernel.h" #include "src/gpu/cl/IClOperator.h" -#include "src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h" #include "src/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h" #include "src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.h" #include "src/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h" @@ -46,10 +45,10 @@ namespace opencl { /** Basic function to execute GEMM on OpenCL. This function calls the following OpenCL kernels: * - * -# @ref kernels::ClGemmReshapeLhsMatrixKernel (only if the RESHAPED_V1 is selected by the heuristic model) - * -# @ref kernels::ClGemmReshapeRhsMatrixKernel (only if either the RESHAPED_V1 or RESHAPED_ONLY_RHS is selected by the select_gemm_kernel method()) - * -# @ref kernels::ClGemmMatrixMultiplyKernel (only if either the NATIVE or RESHAPED_V1 is selected by the select_gemm_kernel method()) - * -# @ref kernels::ClGemmMatrixMultiplyReshapedKernel (only if RESHAPED_V1 is selected by the select_gemm_kernel method()) + * -# @ref kernels::ClGemmReshapeLhsMatrixKernel (only if the RESHAPED is selected by the heuristic model) + * -# @ref kernels::ClGemmReshapeRhsMatrixKernel (only if either the RESHAPED or RESHAPED_ONLY_RHS is selected by the select_gemm_kernel method()) + * -# @ref kernels::ClGemmMatrixMultiplyNativeKernel (only if NATIVE is selected by the select_gemm_kernel method()) + * -# @ref kernels::ClGemmMatrixMultiplyReshapedKernel (only if RESHAPED is selected by the select_gemm_kernel method()) * -# @ref kernels::ClGemmMatrixMultiplyReshapedOnlyRhsKernel (only if RESHAPED_ONLY_RHS is selected by the select_gemm_kernel method()) */ class ClGemm : public IClOperator @@ -100,13 +99,11 @@ public: experimental::MemoryRequirements workspace() const override; private: - void configure_native_v1(const CLCompileContext &compile_context, ITensorInfo *a, ITensorInfo *b, ITensorInfo *c, ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info); - void configure_reshaped_v1(const CLCompileContext &compile_context, ITensorInfo *a, ITensorInfo *b, ITensorInfo *c, ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info); - void configure_reshaped_v2(const CLCompileContext &compile_context, ITensorInfo *a, ITensorInfo *b, ITensorInfo *c, ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info); + void configure_native(const CLCompileContext &compile_context, ITensorInfo *a, ITensorInfo *b, ITensorInfo *c, ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info); + void configure_reshaped(const CLCompileContext &compile_context, ITensorInfo *a, ITensorInfo *b, ITensorInfo *c, ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info); void configure_reshaped_only_rhs(const CLCompileContext &compile_context, ITensorInfo *a, ITensorInfo *b, ITensorInfo *c, ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info); - static Status validate_native_v1(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info); - static Status validate_reshaped_v1(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info); + static Status validate_native(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info); static Status validate_reshaped(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info); static Status validate_reshaped_only_rhs(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &gemm_info); @@ -119,9 +116,9 @@ private: }; private: - std::unique_ptr _mm_kernel; std::unique_ptr _reshape_lhs_kernel; std::unique_ptr _reshape_rhs_kernel; + std::unique_ptr _mm_native_kernel; std::unique_ptr _mm_reshaped_kernel; std::unique_ptr _mm_reshaped_only_rhs_kernel; std::unique_ptr _mm_reshaped_only_rhs_fallback_kernel; diff --git a/src/gpu/cl/operators/ClGemmLowpMatrixMultiplyCore.cpp b/src/gpu/cl/operators/ClGemmLowpMatrixMultiplyCore.cpp index 6fd7e52d5d..7a62186677 100644 --- a/src/gpu/cl/operators/ClGemmLowpMatrixMultiplyCore.cpp +++ b/src/gpu/cl/operators/ClGemmLowpMatrixMultiplyCore.cpp @@ -239,7 +239,6 @@ void ClGemmLowpMatrixMultiplyCore::configure(const CLCompileContext &compile_con GEMMLHSMatrixInfo lhs_info; // Arguments used by GEMMReshapeInfo - // If we pass the matrix A and matrix B reshaped to CLGEMMMatrixMultiplyKernel, we need to pass m, n, k, mult_transpose1xW_width and mult_interleave4x4_height to CLGEMMReshapeInfo // in order to know how the matrices have been reshaped bool reinterpret_input_as_3d = gemm_info.reinterpret_input_as_3d(); const unsigned int m = reinterpret_input_as_3d ? (a->dimension(1) * a->dimension(2)) : a->dimension(1); diff --git a/src/runtime/CL/gemm/CLGEMMDefaultTypeBifrost.cpp b/src/runtime/CL/gemm/CLGEMMDefaultTypeBifrost.cpp index 67253c7277..18ade97885 100644 --- a/src/runtime/CL/gemm/CLGEMMDefaultTypeBifrost.cpp +++ b/src/runtime/CL/gemm/CLGEMMDefaultTypeBifrost.cpp @@ -125,13 +125,13 @@ CLGEMMKernelType CLGEMMDefaultTypeBifrost::default_f32(unsigned int m, unsigned { ARM_COMPUTE_UNUSED(b); - CLGEMMKernelType gemm_type = CLGEMMKernelType::NATIVE_V1; + CLGEMMKernelType gemm_type = CLGEMMKernelType::NATIVE; if(is_rhs_constant) { if((m > 1) && (n < 16)) { - gemm_type = CLGEMMKernelType::RESHAPED_V1; + gemm_type = CLGEMMKernelType::RESHAPED; } else if(m == 1) { @@ -146,17 +146,17 @@ CLGEMMKernelType CLGEMMDefaultTypeBifrost::default_f32(unsigned int m, unsigned constexpr float fact1 = 1.66f; constexpr float ops = 12.0f; const float scale = k > 1024 ? 1.07f : 1.0f; - gemm_type = (alpha + ((n * fact0) / ops) < ((fact1 * n * scale) / ops)) ? CLGEMMKernelType::RESHAPED_V1 : CLGEMMKernelType::NATIVE_V1; + gemm_type = (alpha + ((n * fact0) / ops) < ((fact1 * n * scale) / ops)) ? CLGEMMKernelType::RESHAPED : CLGEMMKernelType::RESHAPED_ONLY_RHS; } else { - gemm_type = CLGEMMKernelType::NATIVE_V1; + gemm_type = CLGEMMKernelType::RESHAPED_ONLY_RHS; } } const auto workload = static_cast((m * n) / 20.0f); - gemm_type = ((workload > 1600.0f) && (gemm_type == CLGEMMKernelType::RESHAPED_V1)) ? CLGEMMKernelType::RESHAPED : gemm_type; + gemm_type = ((workload > 1600.0f) && (gemm_type == CLGEMMKernelType::RESHAPED)) ? CLGEMMKernelType::RESHAPED : gemm_type; } return gemm_type; @@ -179,7 +179,7 @@ CLGEMMKernelType CLGEMMDefaultTypeBifrost::default_f16(unsigned int m, unsigned } else { - return CLGEMMKernelType::NATIVE_V1; + return CLGEMMKernelType::NATIVE; } } @@ -203,7 +203,7 @@ CLGEMMKernelType CLGEMMDefaultTypeBifrost::g76_f32(unsigned int m, unsigned int if(!is_rhs_constant) { - return CLGEMMKernelType::NATIVE_V1; + return CLGEMMKernelType::NATIVE; } if(m == 1) { @@ -260,7 +260,7 @@ CLGEMMKernelType CLGEMMDefaultTypeBifrost::g52_f32(unsigned int m, unsigned int if(!is_rhs_constant) { - return CLGEMMKernelType::NATIVE_V1; + return CLGEMMKernelType::NATIVE; } if(m == 1) @@ -387,7 +387,7 @@ CLGEMMKernelType CLGEMMDefaultTypeBifrost::g76_f16(unsigned int m, unsigned int if(!is_rhs_constant) { - return CLGEMMKernelType::NATIVE_V1; + return CLGEMMKernelType::NATIVE; } if(m == 1) @@ -447,7 +447,7 @@ CLGEMMKernelType CLGEMMDefaultTypeBifrost::g52_f16(unsigned int m, unsigned int { if(!is_rhs_constant) { - return CLGEMMKernelType::NATIVE_V1; + return CLGEMMKernelType::NATIVE; } if(m == 1) @@ -559,19 +559,14 @@ CLGEMMKernelType CLGEMMDefaultTypeBifrost::g52_f16(unsigned int m, unsigned int CLGEMMKernelType CLGEMMDefaultTypeBifrost::g71_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool is_rhs_constant) { ARM_COMPUTE_UNUSED(b); + ARM_COMPUTE_UNUSED(n); + ARM_COMPUTE_UNUSED(k); if(is_rhs_constant) { if(m == 1) { - if(n > k) - { - return CLGEMMKernelType::NATIVE_V1; - } - else - { - return CLGEMMKernelType::RESHAPED_ONLY_RHS; - } + return CLGEMMKernelType::RESHAPED_ONLY_RHS; } else { @@ -580,7 +575,7 @@ CLGEMMKernelType CLGEMMDefaultTypeBifrost::g71_f16(unsigned int m, unsigned int } else { - return CLGEMMKernelType::NATIVE_V1; + return CLGEMMKernelType::NATIVE; } } } // namespace cl_gemm diff --git a/src/runtime/CL/gemm/CLGEMMDefaultTypeMidgard.cpp b/src/runtime/CL/gemm/CLGEMMDefaultTypeMidgard.cpp index a64de9952e..ef30b28f96 100644 --- a/src/runtime/CL/gemm/CLGEMMDefaultTypeMidgard.cpp +++ b/src/runtime/CL/gemm/CLGEMMDefaultTypeMidgard.cpp @@ -73,7 +73,7 @@ CLGEMMKernelType CLGEMMDefaultTypeMidgard::default_f32(unsigned int m, unsigned ARM_COMPUTE_UNUSED(n, k, b); // We reshape the matrices only if we do not have the vector-by-matrix case and we reshape the matrix B only once - return ((m != 1) && is_rhs_constant) ? CLGEMMKernelType::RESHAPED_V1 : CLGEMMKernelType::NATIVE_V1; + return ((m != 1) && is_rhs_constant) ? CLGEMMKernelType::RESHAPED : CLGEMMKernelType::NATIVE; } CLGEMMKernelType CLGEMMDefaultTypeMidgard::default_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool is_rhs_constant) @@ -81,7 +81,7 @@ CLGEMMKernelType CLGEMMDefaultTypeMidgard::default_f16(unsigned int m, unsigned ARM_COMPUTE_UNUSED(n, k, b); // We reshape the matrices only if we do not have the vector-by-matrix case and we reshape the matrix B only once - return ((m != 1) && is_rhs_constant) ? CLGEMMKernelType::RESHAPED_V1 : CLGEMMKernelType::NATIVE_V1; + return ((m != 1) && is_rhs_constant) ? CLGEMMKernelType::RESHAPED : CLGEMMKernelType::NATIVE; } CLGEMMKernelType CLGEMMDefaultTypeMidgard::default_q8(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool is_rhs_constant) diff --git a/src/runtime/CL/gemm/CLGEMMDefaultTypeValhall.cpp b/src/runtime/CL/gemm/CLGEMMDefaultTypeValhall.cpp index b3403b2aaf..64271a8801 100644 --- a/src/runtime/CL/gemm/CLGEMMDefaultTypeValhall.cpp +++ b/src/runtime/CL/gemm/CLGEMMDefaultTypeValhall.cpp @@ -108,21 +108,21 @@ CLGEMMKernelType CLGEMMDefaultTypeValhall::default_f32(unsigned int m, unsigned { ARM_COMPUTE_UNUSED(m, n, k, b); - return is_rhs_constant ? CLGEMMKernelType::RESHAPED_ONLY_RHS : CLGEMMKernelType::NATIVE_V1; + return is_rhs_constant ? CLGEMMKernelType::RESHAPED_ONLY_RHS : CLGEMMKernelType::NATIVE; } CLGEMMKernelType CLGEMMDefaultTypeValhall::default_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool is_rhs_constant) { ARM_COMPUTE_UNUSED(m, n, k, b); - return is_rhs_constant ? CLGEMMKernelType::RESHAPED_ONLY_RHS : CLGEMMKernelType::NATIVE_V1; + return is_rhs_constant ? CLGEMMKernelType::RESHAPED_ONLY_RHS : CLGEMMKernelType::NATIVE; } CLGEMMKernelType CLGEMMDefaultTypeValhall::g77_f16(unsigned int m, unsigned int n, unsigned int k, unsigned int b, bool is_rhs_constant) { if(!is_rhs_constant) { - return CLGEMMKernelType::NATIVE_V1; + return CLGEMMKernelType::NATIVE; } if(m == 1) @@ -242,7 +242,7 @@ CLGEMMKernelType CLGEMMDefaultTypeValhall::g78_f32(unsigned int m, unsigned int if(!is_rhs_constant) { - return CLGEMMKernelType::NATIVE_V1; + return CLGEMMKernelType::NATIVE; } if(m == 1) @@ -301,7 +301,7 @@ CLGEMMKernelType CLGEMMDefaultTypeValhall::g78_f16(unsigned int m, unsigned int if(!is_rhs_constant) { - return CLGEMMKernelType::NATIVE_V1; + return CLGEMMKernelType::NATIVE; } return CLGEMMKernelType::RESHAPED_ONLY_RHS; diff --git a/tests/validate_examples/cl_gemm.cpp b/tests/validate_examples/cl_gemm.cpp index 717ba77e17..82dfc053db 100644 --- a/tests/validate_examples/cl_gemm.cpp +++ b/tests/validate_examples/cl_gemm.cpp @@ -39,7 +39,6 @@ #include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h" #include "src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h" #include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h" -#include "src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h" #include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h" #include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h" #include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h" diff --git a/tests/validation/CL/GEMMMatrixMultiply.cpp b/tests/validation/CL/GEMMMatrixMultiply.cpp deleted file mode 100644 index faa2413489..0000000000 --- a/tests/validation/CL/GEMMMatrixMultiply.cpp +++ /dev/null @@ -1,339 +0,0 @@ -/* - * Copyright (c) 2019-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/core/KernelDescriptors.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "arm_compute/runtime/CL/CLTensor.h" -#include "arm_compute/runtime/CL/CLTensorAllocator.h" -#include "src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h" -#include "tests/CL/CLAccessor.h" -#include "tests/CL/Helper.h" -#include "tests/PaddingCalculator.h" -#include "tests/datasets/ShapeDatasets.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Macros.h" -#include "tests/framework/datasets/Datasets.h" -#include "tests/validation/Validation.h" -#include "tests/validation/fixtures/GEMMFixture.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -using namespace arm_compute::misc::shape_calculator; -using namespace arm_compute::opencl::kernels; - -// Create function for CLGEMMMatrixMultiplyKernel -using CLGEMMMatrixMultiplyNative = CLSynthetizeOperator; - -// Fixture for GEMMMatrixMultiplyValidationFixture -template -using CLGEMMMatrixMultiplyNativeFixture = GEMMMatrixMultiplyValidationFixture; - -// Fixture for GEMMMatrixMultiply3DValidationFixture -template -using CLGEMMMatrixMultiplyNative3DFixture = GEMMMatrixMultiply3DValidationFixture; - -namespace -{ -// *INDENT-OFF* -// clang-format off -RelativeTolerance rel_tolerance_f32(0.001f); -constexpr float abs_tolerance_f32(0.0001f); - -RelativeTolerance rel_tolerance_f16(half(0.2)); -constexpr float tolerance_num_f16 = 0.02f; - -/** Alpha values to test */ -const auto alpha_values = framework::dataset::make("alpha", {1.0f, -0.75f} ); - -/** Beta values to test */ -const auto beta_values = framework::dataset::make("beta", {-0.35f, 0.0f} ); - -/** M, N combinations to test - * 1: Special 1x1 case - * 2: Special multples of processor size in both dimensions - * 3: Non multiples of processor size in both dimensions - * 4: Special 1x1003 case -*/ -const auto m_n_values = zip( - framework::dataset::make("M", {1, 16, 37, 1}), - framework::dataset::make("N", {1, 16, 51, 1003}) - ); - -/** N values to test */ -const auto n_values = framework::dataset::make("N", {51, 1003}); - -/** K values to test */ -const auto k_values = framework::dataset::make("K", 23); - -/** M_W values to test */ -const auto m_w_values = framework::dataset::make("M_W", 5); - -/** M_H values to test */ -const auto m_h_values = framework::dataset::make("M_H", 7); - -/** Batch size values to test */ -const auto b_values = framework::dataset::make("batch_size", 1, 3); - -/** Activation values to test */ -const auto act_values = framework::dataset::make("Activation", -{ - ActivationLayerInfo(), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f), -}); - -/** Broadcast bias from vector to matrix */ -const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", { false, true } ); - -/** GPU architectures values to test */ -const auto gpu_arch_values = framework::dataset::make("GPUArch", -{ - GPUTarget::MIDGARD, - GPUTarget::BIFROST -}); - -/** Data types values to test in the configuration */ -const auto data_type_values = framework::dataset::make("DataType", -{ - DataType::F32, - DataType::F16 -}); - -/** M values to test */ -const auto fp16_mixed_precision_values = framework::dataset::make("fp16_mixed_precision", {true, false}); -} // namespace - -TEST_SUITE(CL) -TEST_SUITE(GEMMMatrixMultiply) -TEST_CASE(Negative, framework::DatasetMode::ALL) -{ - // Unsupported QASYMM8 data type - { - const auto lhs = TensorInfo(TensorShape(13U, 12U, 1U, 1U), 1, DataType::QASYMM8); - const auto rhs = TensorInfo(TensorShape(14U, 13U, 1U, 1U), 1, DataType::QASYMM8); - const auto out = TensorInfo(TensorShape(14U, 12U, 1U, 1U), 1, DataType::QASYMM8); - constexpr float alpha = 1.3f; - constexpr float beta = 0.7f; - const bool is_interleaved_transposed = false; - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, false); - const GPUTarget gpu_target = GPUTarget::MIDGARD; - const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target); - ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS); - } - - // Unsupported SIZE_T data type - { - const auto lhs = TensorInfo(TensorShape(13U, 12U, 1U, 1U), 1, DataType::SIZET); - const auto rhs = TensorInfo(TensorShape(14U, 13U, 1U, 1U), 1, DataType::SIZET); - const auto out = TensorInfo(TensorShape(14U, 12U, 1U, 1U), 1, DataType::SIZET); - constexpr float alpha = 1.3f; - constexpr float beta = 0.7f; - const bool is_interleaved_transposed = false; - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, false); - const GPUTarget gpu_target = GPUTarget::MIDGARD; - const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target); - ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS); - } - - // Mixed precision with F32 - { - const auto lhs = TensorInfo(TensorShape(13U, 12U, 1U, 1U), 1, DataType::F32); - const auto rhs = TensorInfo(TensorShape(14U, 13U, 1U, 1U), 1, DataType::F32); - const auto out = TensorInfo(TensorShape(14U, 12U, 1U, 1U), 1, DataType::F32); - constexpr float alpha = 1.3f; - constexpr float beta = 0.7f; - const bool is_interleaved_transposed = false; - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, false); - const GPUTarget gpu_target = GPUTarget::MIDGARD; - const bool fp_mixed_precision = true; - const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision); - ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS); - } - - // Max number of dimensions LHS matrix - { - const auto lhs = TensorInfo(TensorShape(13U, 12U, 1U, 1U, 4U), 1, DataType::F32); - const auto rhs = TensorInfo(TensorShape(14U, 13U, 1U, 1U), 1, DataType::F32); - const auto out = TensorInfo(TensorShape(14U, 12U, 1U, 1U), 1, DataType::F32); - constexpr float alpha = 1.3f; - constexpr float beta = 0.7f; - const bool is_interleaved_transposed = false; - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, false); - const GPUTarget gpu_target = GPUTarget::MIDGARD; - const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target); - ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS); - } - - // Max number of dimensions RHS matrix - { - const auto lhs = TensorInfo(TensorShape(13U, 12U, 1U, 4U), 1, DataType::F32); - const auto rhs = TensorInfo(TensorShape(14U, 13U, 1U, 4U), 1, DataType::F32); - const auto out = TensorInfo(TensorShape(14U, 12U, 1U, 4U), 1, DataType::F32); - constexpr float alpha = 1.3f; - constexpr float beta = 0.7f; - const bool is_interleaved_transposed = false; - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, false); - const GPUTarget gpu_target = GPUTarget::MIDGARD; - const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target); - ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS); - } - - // Broadcast bias - { - const auto lhs = TensorInfo(TensorShape(13U, 12U, 1U, 1U), 1, DataType::F16); - const auto rhs = TensorInfo(TensorShape(14U, 13U, 1U, 1U), 1, DataType::F16); - // The correct shape should be bias = TensorInfo(TensorShape(14U, 1U, 1U, 1U), 1, DataType::F32); - const auto bias = TensorInfo(TensorShape(14U, 12U, 1U, 1U), 1, DataType::F16); - const auto out = TensorInfo(TensorShape(14U, 12U, 1U, 1U), 1, DataType::F16); - constexpr float alpha = 1.3f; - constexpr float beta = 0.7f; - const bool is_interleaved_transposed = false; - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, true); - const GPUTarget gpu_target = GPUTarget::MIDGARD; - const bool fp_mixed_precision = false; - const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision); - ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS); - } - - // Invalid dimensions for the bias - { - const auto lhs = TensorInfo(TensorShape(13U, 12U, 1U, 1U), 1, DataType::F32); - const auto rhs = TensorInfo(TensorShape(14U, 13U, 1U, 1U), 1, DataType::F32); - // The correct shape should be bias = TensorInfo(TensorShape(14U, 12U, 1U, 1U), 1, DataType::F32); - const auto bias = TensorInfo(TensorShape(14U, 8U, 1U, 1U), 1, DataType::F32); - const auto out = TensorInfo(TensorShape(14U, 12U, 1U, 1U), 1, DataType::F32); - constexpr float alpha = 1.3f; - constexpr float beta = 0.7f; - const bool is_interleaved_transposed = false; - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, false); - const GPUTarget gpu_target = GPUTarget::MIDGARD; - const bool fp_mixed_precision = false; - const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision); - ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS); - } - - // Invalid dimensions for the output - { - const auto lhs = TensorInfo(TensorShape(13U, 12U, 1U, 1U), 1, DataType::F32); - const auto rhs = TensorInfo(TensorShape(14U, 13U, 1U, 1U), 1, DataType::F32); - // The correct shape should be out = TensorInfo(TensorShape(14U, 12U, 1U, 1U), 1, DataType::F32); - const auto out = TensorInfo(TensorShape(14U, 7U, 1U, 1U), 1, DataType::F32); - constexpr float alpha = 1.3f; - constexpr float beta = 0.7f; - const bool is_interleaved_transposed = false; - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, false); - const GPUTarget gpu_target = GPUTarget::MIDGARD; - const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target); - ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS); - } -} - -TEST_SUITE(Float) -TEST_SUITE(FP32) - -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyNativeFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_n_values, - k_values), - b_values), - alpha_values), - beta_values), - broadcast_bias_values), - framework::dataset::make("fp16_mixed_precision", false)), - act_values), - framework::dataset::make("DataType", DataType::F32)), - gpu_arch_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); -} - -FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyNative3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - alpha_values), - beta_values), - broadcast_bias_values), - framework::dataset::make("fp16_mixed_precision", false)), - act_values), - framework::dataset::make("DataType", DataType::F32)), - gpu_arch_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); -} - -TEST_SUITE_END() // FP32 - -TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyNativeFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_n_values, - k_values), - b_values), - alpha_values), - beta_values), - broadcast_bias_values), - fp16_mixed_precision_values), - act_values), - framework::dataset::make("DataType", DataType::F16)), - gpu_arch_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyNative3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - alpha_values), - beta_values), - broadcast_bias_values), - fp16_mixed_precision_values), - act_values), - framework::dataset::make("DataType", DataType::F16)), - gpu_arch_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -TEST_SUITE_END() // FP16 -TEST_SUITE_END() // Float -TEST_SUITE_END() // GEMMMatrixMuliplty -TEST_SUITE_END() // CL -} // namespace validation -} // namespace test -} // namespace arm_compute \ No newline at end of file diff --git a/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp b/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp deleted file mode 100644 index 9313ae34d6..0000000000 --- a/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp +++ /dev/null @@ -1,334 +0,0 @@ -/* - * Copyright (c) 2019-2021 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include "arm_compute/core/KernelDescriptors.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "arm_compute/runtime/CL/CLTensor.h" -#include "arm_compute/runtime/CL/CLTensorAllocator.h" -#include "src/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h" -#include "src/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h" -#include "src/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h" -#include "tests/CL/CLAccessor.h" -#include "tests/CL/Helper.h" -#include "tests/PaddingCalculator.h" -#include "tests/datasets/ShapeDatasets.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Macros.h" -#include "tests/framework/datasets/Datasets.h" -#include "tests/validation/Validation.h" -#include "tests/validation/fixtures/GEMMFixture.h" - -namespace arm_compute -{ -namespace test -{ -namespace validation -{ -using namespace arm_compute::misc::shape_calculator; -using namespace arm_compute::opencl::kernels; - -// Create function for ClGemmReshapeLhsMatrixKernel -using CLGEMMReshapeLHSMatrix = CLSynthetizeOperator; - -// Create function for ClGemmReshapeRhsMatrixKernel -using CLGEMMReshapeRHSMatrix = CLSynthetizeOperator; - -// Create function for ClGemmMatrixMultiplyKernel -using CLGEMMMatrixMultiplyReshaped = CLSynthetizeOperator; - -// Fixture for GEMMMatrixMultiplyInterleavedTransposedValidationFixture -template -using CLGEMMMatrixMultiplyReshapedFixture = - GEMMMatrixMultiplyInterleavedTransposedValidationFixture; - -// Fixture for GEMMMatrixMultiplyInterleavedTransposed3DValidationFixture -template -using CLGEMMMatrixMultiplyReshaped3DFixture = - GEMMMatrixMultiplyInterleavedTransposed3DValidationFixture; - -namespace -{ -// *INDENT-OFF* -// clang-format off -RelativeTolerance rel_tolerance_f32(0.001f); -constexpr float abs_tolerance_f32(0.0001f); - -RelativeTolerance rel_tolerance_f16(half(0.2)); -constexpr float tolerance_num_f16 = 0.02f; - -/** Alpha values to test */ -const auto alpha_values = framework::dataset::make("alpha", {1.0f, -0.75f} ); - -/** Beta values to test */ -const auto beta_values = framework::dataset::make("beta", {-0.35f, 0.0f} ); - -/** M, N combinations to test - * 1: Special 1x1 case - * 2: Special multples of processor size in both dimensions - * 3: Non multiples of processor size in both dimensions -*/ -const auto m_n_values = zip( - framework::dataset::make("M", {1, 16, 37}), - framework::dataset::make("N", {1, 16, 51}) - ); - -/** N values to test */ -const auto n_values = framework::dataset::make("N", 51); - -/** K values to test */ -const auto k_values = framework::dataset::make("K", 23); - -/** M_W values to test */ -const auto m_w_values = framework::dataset::make("M_W", 5); - -/** M_H values to test */ -const auto m_h_values = framework::dataset::make("M_H", 7); - -/** Batch size values to test */ -const auto b_values = framework::dataset::make("batch_size", 1, 3); - -/** Activation values to test */ -const auto act_values = framework::dataset::make("Activation", -{ - ActivationLayerInfo(), - ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, 2.f), -}); - -/** V0 values to test */ -const auto v0_values = framework::dataset::make("V0", 2); - -/** H0 values to test */ -const auto h0_values = framework::dataset::make("H0", 4); - -/** Broadcast bias from vector to matrix */ -const auto broadcast_bias_values = framework::dataset::make("broadcast_bias", {false, true} ); - -/** GPU architectures values to test */ -const auto gpu_arch_values = framework::dataset::make("GPUArch", -{ - GPUTarget::MIDGARD, - GPUTarget::BIFROST -}); - -/** Data types values to test in the configuration */ -const auto data_type_values = framework::dataset::make("DataType", -{ - DataType::F32, - DataType::F16 -}); - -/** M values to test */ -const auto fp16_mixed_precision_values = framework::dataset::make("fp16_mixed_precision", {true, false}); -} // namespace - -TEST_SUITE(CL) -TEST_SUITE(GEMMMatrixMultiplyInterleavedTransposed) -TEST_CASE(Negative, framework::DatasetMode::ALL) -{ - // The following tests are already integrated in the GEMMMatrixMultiply validation because - // in common with this validation - // - Unsupported QASYMM8 data type - // - Unsupported SIZE_T data type - // - Mixed precision with F32 - // - Max number of dimensions LHS matrix - // - Max number of dimensions RHS matrix - - // Invalid LHS dimensions - { - // The correct shape should be: lhs = TensorInfo(TensorShape(256U, 1U, 1U, 1U), 1, DataType::F32); - const auto lhs = TensorInfo(TensorShape(256U, 2U, 1U, 1U), 1, DataType::F32); - const auto rhs = TensorInfo(TensorShape(104U, 3U, 1U, 1U), 1, DataType::F32); - const auto bias = TensorInfo(TensorShape(24U, 16U, 1U, 1U), 1, DataType::F32); - const auto out = TensorInfo(TensorShape(24U, 16U, 1U, 1U), 1, DataType::F32); - constexpr float alpha = 1.3f; - constexpr float beta = 0.7f; - const bool is_interleaved_transposed = true; - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(16, 24, 13, 2, 4, 0, false, false); - const GPUTarget gpu_target = GPUTarget::MIDGARD; - const bool fp_mixed_precision = false; - const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision); - ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS); - } - - // Invalid RHS dimensions - { - const auto lhs = TensorInfo(TensorShape(256U, 1U, 1U, 1U), 1, DataType::F32); - // The correct shape should be rhs = TensorInfo(TensorShape(104U, 3U, 1U, 1U), 1, DataType::F32); - const auto rhs = TensorInfo(TensorShape(104U, 4U, 1U, 1U), 1, DataType::F32); - const auto bias = TensorInfo(TensorShape(24U, 16U, 1U, 1U), 1, DataType::F32); - const auto out = TensorInfo(TensorShape(24U, 16U, 1U, 1U), 1, DataType::F32); - constexpr float alpha = 1.3f; - constexpr float beta = 0.7f; - const bool is_interleaved_transposed = true; - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(16, 24, 13, 2, 4, 0, false, false); - const GPUTarget gpu_target = GPUTarget::MIDGARD; - const bool fp_mixed_precision = false; - const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision); - ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS); - } - - // Broadcast bias - { - const auto lhs = TensorInfo(TensorShape(256U, 1U, 1U, 1U), 1, DataType::F32); - const auto rhs = TensorInfo(TensorShape(104U, 3U, 1U, 1U), 1, DataType::F32); - // The correct shape should be bias = TensorInfo(TensorShape(24U, 1U, 1U, 1U), 1, DataType::F32); - const auto bias = TensorInfo(TensorShape(24U, 16U, 1U, 1U), 1, DataType::F32); - const auto out = TensorInfo(TensorShape(24U, 16U, 1U, 1U), 1, DataType::F32); - constexpr float alpha = 1.3f; - constexpr float beta = 0.7f; - const bool is_interleaved_transposed = true; - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(16, 24, 13, 2, 4, 0, false, true); - const GPUTarget gpu_target = GPUTarget::MIDGARD; - const bool fp_mixed_precision = false; - const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision); - ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS); - } - - // Invalid dimensions for the bias - { - const auto lhs = TensorInfo(TensorShape(256U, 1U, 1U, 1U), 1, DataType::F32); - const auto rhs = TensorInfo(TensorShape(104U, 3U, 1U, 1U), 1, DataType::F32); - // The correct shape should be bias = TensorInfo(TensorShape(24U, 16U, 1U, 1U), 1, DataType::F32); - const auto bias = TensorInfo(TensorShape(25U, 16U, 1U, 1U), 1, DataType::F32); - const auto out = TensorInfo(TensorShape(24U, 16U, 1U, 1U), 1, DataType::F32); - constexpr float alpha = 1.3f; - constexpr float beta = 0.7f; - const bool is_interleaved_transposed = true; - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(16, 24, 13, 2, 4, 0, false, false); - const GPUTarget gpu_target = GPUTarget::MIDGARD; - const bool fp_mixed_precision = false; - const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision); - ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS); - } - - // Invalid dimensions for the output - { - const auto lhs = TensorInfo(TensorShape(256U, 1U, 1U, 1U), 1, DataType::F32); - const auto rhs = TensorInfo(TensorShape(104U, 3U, 1U, 1U), 1, DataType::F32); - const auto bias = TensorInfo(TensorShape(24U, 16U, 1U, 1U), 1, DataType::F32); - // The correct shape should be out = TensorInfo(TensorShape(24U, 16U, 1U, 1U), 1, DataType::F32); - const auto out = TensorInfo(TensorShape(24U, 13U, 1U, 1U), 1, DataType::F32); - constexpr float alpha = 1.3f; - constexpr float beta = 0.7f; - const bool is_interleaved_transposed = true; - const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(16, 24, 13, 2, 4, 0, false, false); - const GPUTarget gpu_target = GPUTarget::MIDGARD; - const bool fp_mixed_precision = false; - const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision); - ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS); - } -} - -TEST_SUITE(Float) -TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_n_values, - k_values), - b_values), - alpha_values), - beta_values), - v0_values), - h0_values), - broadcast_bias_values), - framework::dataset::make("fp16_mixed_precision", false)), - act_values), - framework::dataset::make("DataType", DataType::F32)), - gpu_arch_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); -} - -FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshaped3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - alpha_values), - beta_values), - v0_values), - h0_values), - broadcast_bias_values), - framework::dataset::make("fp16_mixed_precision", false)), - act_values), - framework::dataset::make("DataType", DataType::F32)), - gpu_arch_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0.f, abs_tolerance_f32); -} - -TEST_SUITE_END() // FP32 - -TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMMatrixMultiplyReshapedFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_n_values, - k_values), - b_values), - alpha_values), - beta_values), - v0_values), - h0_values), - broadcast_bias_values), - fp16_mixed_precision_values), - act_values), - framework::dataset::make("DataType", DataType::F16)), - gpu_arch_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -FIXTURE_DATA_TEST_CASE(RunSmall3D, CLGEMMMatrixMultiplyReshaped3DFixture, framework::DatasetMode::ALL, - combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( - m_w_values, - m_h_values), - n_values), - k_values), - b_values), - alpha_values), - beta_values), - v0_values), - h0_values), - broadcast_bias_values), - fp16_mixed_precision_values), - act_values), - framework::dataset::make("DataType", DataType::F16)), - gpu_arch_values)) -{ - // Validate output - validate(CLAccessor(_target), _reference, rel_tolerance_f16, tolerance_num_f16); -} - -TEST_SUITE_END() // FP16 -TEST_SUITE_END() // Float -TEST_SUITE_END() // GEMMMatrixMulipltyInterleavedTransposed -TEST_SUITE_END() // CL -} // namespace validation -} // namespace test -} // namespace arm_compute \ No newline at end of file diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h index 3e73b906db..220c3ac189 100644 --- a/utils/TypePrinter.h +++ b/utils/TypePrinter.h @@ -2360,14 +2360,6 @@ inline std::string to_string(CLGEMMKernelType val) { switch(val) { - case CLGEMMKernelType::NATIVE_V1: - { - return "Native_V1"; - } - case CLGEMMKernelType::RESHAPED_V1: - { - return "Reshaped_V1"; - } case CLGEMMKernelType::NATIVE: { return "Native"; -- cgit v1.2.1