From 3b0a2654034714c16f5930d2b24936d8be7b18a6 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Fri, 7 Dec 2018 11:18:09 +0000 Subject: COMPMID-1775: Implement CLGEMMReshapeRHSMatrixKernel to reshape the RHS matrix of GEMM/GEMMLowp Change-Id: I77f2bfcc5d170bcc2428a2f27104942c1ec877d7 Reviewed-on: https://review.mlplatform.org/375 Reviewed-by: Michele Di Giorgio Tested-by: Arm Jenkins --- arm_compute/core/CL/CLKernels.h | 1 + .../core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h | 86 +++ arm_compute/core/Types.h | 10 + arm_compute/core/utils/misc/ShapeCalculator.h | 28 + src/core/CL/CLKernelLibrary.cpp | 2 + src/core/CL/cl_kernels/gemm.cl | 580 +++++++++++++++++++++ .../CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp | 172 ++++++ tests/validation/CL/GEMMReshapeRHSMatrix.cpp | 330 ++++++++++++ .../fixtures/GEMMReshapeRHSMatrixFixture.h | 128 +++++ .../validation/reference/GEMMReshapeRHSMatrix.cpp | 111 ++++ tests/validation/reference/GEMMReshapeRHSMatrix.h | 44 ++ 11 files changed, 1492 insertions(+) create mode 100644 arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h create mode 100644 src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp create mode 100644 tests/validation/CL/GEMMReshapeRHSMatrix.cpp create mode 100644 tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h create mode 100644 tests/validation/reference/GEMMReshapeRHSMatrix.cpp create mode 100644 tests/validation/reference/GEMMReshapeRHSMatrix.h diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h index 7bfd44721f..37b92f2d6c 100644 --- a/arm_compute/core/CL/CLKernels.h +++ b/arm_compute/core/CL/CLKernels.h @@ -80,6 +80,7 @@ #include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMMatrixVectorMultiplyKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h" +#include "arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h" #include "arm_compute/core/CL/kernels/CLGaussian3x3Kernel.h" #include "arm_compute/core/CL/kernels/CLGaussian5x5Kernel.h" diff --git a/arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h b/arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h new file mode 100644 index 0000000000..611549a1cb --- /dev/null +++ b/arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_CLGEMMRESHAPERHSMATRIXKERNEL_H__ +#define __ARM_COMPUTE_CLGEMMRESHAPERHSMATRIXKERNEL_H__ + +#include "arm_compute/core/CL/ICLKernel.h" + +namespace arm_compute +{ +class ICLTensor; + +/** OpenCL kernel to reshape the RHS matrix when performing the matrix multiplication + * In particular, this kernel splits the input matrix in blocks of size K0xN0 and stores each one in + * the output matrix unrolling the values */ +class CLGEMMReshapeRHSMatrixKernel : public ICLKernel +{ +public: + /** Default constructor */ + CLGEMMReshapeRHSMatrixKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLGEMMReshapeRHSMatrixKernel(const CLGEMMReshapeRHSMatrixKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLGEMMReshapeRHSMatrixKernel &operator=(const CLGEMMReshapeRHSMatrixKernel &) = delete; + /** Allow instances of this class to be moved */ + CLGEMMReshapeRHSMatrixKernel(CLGEMMReshapeRHSMatrixKernel &&) = default; + /** Allow instances of this class to be moved */ + CLGEMMReshapeRHSMatrixKernel &operator=(CLGEMMReshapeRHSMatrixKernel &&) = default; + /** Initialise the kernel's input and output. + * + * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 + * @param[out] output Output tensor. Data type supported: same as @p input + * @param[in] rhs_info RHS matrix information to be used for reshaping. This object contains all the necessary + * information to reshape the input tensor. Only the following values are supported: + * rhs_info.n0: 2,4,8,16 + * rhs_info.k0: 1,2,4,8,16 (k0 = 1 and k0 = 2 only if rhs_info.transpose = false) + * rhs_info.h0: greater than 0 + * rhs_info.transpose: true, false + * rhs_info.interleave: true, false + */ + void configure(const ICLTensor *input, ICLTensor *output, const GEMMRHSMatrixInfo &rhs_info); + /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMReshapeRHSMatrixKernel + * + * @param[in] input Input tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 + * @param[in] output Output tensor info which stores the interleaved matrix. Data type supported: same as @p input. + * @param[in] rhs_info RHS matrix information to be used for reshaping. This object contains all the necessary + * information to reshape the input tensor. Only the following values are supported: + * rhs_info.n0: 2,4,8,16 + * rhs_info.k0: 1,2,4,8,16 (k0 = 1 and k0 = 2 only if rhs_info.transpose = false) + * rhs_info.h0: greater than 0 + * rhs_info.transpose: true, false + * rhs_info.interleave: true, false + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *output, const GEMMRHSMatrixInfo &rhs_info); + + // Inherited methods overridden + void run(const Window &window, cl::CommandQueue &queue) override; + +private: + const ICLTensor *_input; + ICLTensor *_output; +}; +} // namespace arm_compute +#endif /* __ARM_COMPUTE_CLGEMMRESHAPERHSMATRIXKERNEL_H__ */ \ No newline at end of file diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h index 55b0ccb30d..6ef9878a95 100644 --- a/arm_compute/core/Types.h +++ b/arm_compute/core/Types.h @@ -1781,6 +1781,16 @@ struct GEMMLHSMatrixInfo bool interleave{ true }; /**< True if the v0 (m0xk0) blocks have to be interleaved in the output row */ }; +/** GEMM RHS (Right Hand Side) matrix information */ +struct GEMMRHSMatrixInfo +{ + unsigned int n0{ 1 }; /**< Number of columns processed by the matrix multiplication */ + unsigned int k0{ 1 }; /**< Number of partial accumulations performed by the matrix multiplication */ + unsigned int h0{ 1 }; /**< Number of horizontal blocks of size (k0xn0) stored on the same output row */ + bool transpose{ true }; /**< True if the (k0xn0) block has to be transposed before been stored */ + bool interleave{ true }; /**< True if the h0 (k0xn0) blocks have to be interleaved in the output row */ +}; + /** GEMM information class. This class stores the necessary information to compute GEMM functions * * This object also contains the information about how matrix A and matrix B have been reshaped diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h index 88ce8d9e7b..33893ad877 100644 --- a/arm_compute/core/utils/misc/ShapeCalculator.h +++ b/arm_compute/core/utils/misc/ShapeCalculator.h @@ -138,6 +138,34 @@ inline TensorShape compute_lhs_reshaped_shape(const ITensorInfo &a, const GEMMLH return lhs_shape; } +inline TensorShape compute_rhs_reshaped_shape(const ITensorInfo &a, const GEMMRHSMatrixInfo &rhs_info) +{ + ARM_COMPUTE_ERROR_ON(rhs_info.n0 == 0); + ARM_COMPUTE_ERROR_ON(rhs_info.k0 == 0); + ARM_COMPUTE_ERROR_ON(rhs_info.h0 == 0); + + // Input width/height + const unsigned int input_width = a.dimension(0); + const unsigned int input_height = a.dimension(1); + + // Number of horizontal/vertical blocks in the input tensor + const unsigned int num_horiz_blocks = std::ceil(input_width / static_cast(rhs_info.n0)); + const unsigned int num_vert_blocks = std::ceil(input_height / static_cast(rhs_info.k0)); + + // Block size + const unsigned int block_size = rhs_info.n0 * rhs_info.k0; + + // Output width/height + const unsigned int output_width = block_size * num_vert_blocks * rhs_info.h0; + const unsigned int output_height = std::ceil(num_horiz_blocks / static_cast(rhs_info.h0)); + + TensorShape rhs_shape{ a.tensor_shape() }; + rhs_shape.set(0, output_width); + rhs_shape.set(1, output_height); + + return rhs_shape; +} + inline TensorShape compute_interleaved_shape(const ITensorInfo &a, int mult_interleave4x4_height = 1, bool reinterpret_input_as_3d = false) { // The interleaved output matrix will have the following shape: [ a_height * W, ceil(a_width / W) ] where W = 4 * mult_interleave4x4_height diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 7b98e5ae80..54fc618bdf 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -283,6 +283,8 @@ const std::map CLKernelLibrary::_kernel_program_map = { "gemm_lc_vm_f32", "gemm.cl" }, { "gemm_transpose1xW", "gemm.cl" }, { "gemm_reshape_lhs_matrix_nt", "gemm.cl" }, + { "gemm_reshape_rhs_matrix_nt", "gemm.cl" }, + { "gemm_reshape_rhs_matrix_t", "gemm.cl" }, { "gemmlowp_matrix_a_reduction", "gemmlowp.cl" }, { "gemmlowp_matrix_a_reduction_dot8", "gemmlowp.cl" }, { "gemmlowp_matrix_b_reduction", "gemmlowp.cl" }, diff --git a/src/core/CL/cl_kernels/gemm.cl b/src/core/CL/cl_kernels/gemm.cl index cf1e021929..40ee1d45ad 100644 --- a/src/core/CL/cl_kernels/gemm.cl +++ b/src/core/CL/cl_kernels/gemm.cl @@ -252,6 +252,586 @@ __kernel void gemm_reshape_lhs_matrix_nt(TENSOR3D_DECLARATION(src), } #endif // defined(M0) && defined(K0) && defined(V0) && defined(DATA_TYPE) +#if defined(K0) && defined(N0) && defined(H0) && defined(DATA_TYPE) && defined(SRC_HEIGHT) +/** This OpenCL kernel reshapes the rhs input matrix. The kernel splits the input matrix in blocks of size K0xN0 and stores each one (not transposed) in + * the output matrix unrolling the values. + * + * @note The data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=float) + * @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT (i.e. -DSRC_HEIGHT=16) + * @note The block's dimensions (K0 and N0) must be passed at compile time using -DK0 and -DN0 (i.e. -DK0=2, -DN0=2). + * @note The number of K0xN0 vertical blocks to store on the same output row must be passed at compile time using -DH0 (i.e. -DH0=2) + * @note If the K0xN0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time. + * @note Only the following values for K0, N0 and H0 are supported: + * N0: 2,4,8,16 + * K0: 1,2,4,8,16 + * H0: greater than 0 + * + * @param[in] src_ptr Pointer to the source RHS tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 + * @param[in] src_stride_x Stride of the source RHS tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source RHS tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source RHS tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source RHS tensor + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix + */ +__kernel void gemm_reshape_rhs_matrix_nt(TENSOR3D_DECLARATION(src), + TENSOR3D_DECLARATION(dst)) +{ + // Block size +#define BLOCK_SIZE ((K0) * (N0)) + + // Output offset X +#if defined(INTERLEAVE) +#define OUTPUT_OFFSET_X (N0) +#else // defined(INTERLEAVE) +#define OUTPUT_OFFSET_X (BLOCK_SIZE) +#endif // defined(INTERLEAVE) + + // Output step X +#if defined(INTERLEAVE) +#define OUTPUT_STEP_X (N0) * (H0) +#else // Do not interleave +#define OUTPUT_STEP_X (N0) +#endif // defined(INTERLEAVE) + + // Compute source and destination addresses + uint x = get_global_id(0); + uint y = get_global_id(1); + uint z = get_global_id(2); + + // ------------------ Compute input/output addresses --------------------------- + + // Compute the input address + __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * (uint)N0 * sizeof(DATA_TYPE) + y * (uint)K0 * src_stride_y + z * (uint)src_stride_z; + + // Compute the output address + __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + (y * (uint)BLOCK_SIZE * (uint)H0 * sizeof(DATA_TYPE)) + ((x % (uint)H0) * (uint)OUTPUT_OFFSET_X * sizeof(DATA_TYPE)) + (( + x / (uint)H0) + * (uint)dst_stride_y) + + z * (uint)dst_stride_z; + + // ---------------------------Load input values -------------------------------- + + VEC_DATA_TYPE(DATA_TYPE, N0) + a0 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a1 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a2 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a3 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a4 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a5 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a6 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a7 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a8 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a9 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + aA = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + aB = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + aC = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + aD = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + aE = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + aF = 0; + + // Load values from the RHS matrix + a0 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 0 * src_stride_y)); +#if K0 > 1 + if(y * (uint)K0 + 1 < SRC_HEIGHT) + { + a1 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 1 * src_stride_y)); + } +#endif // K0 > 1 +#if K0 > 2 + if(y * (uint)K0 + 2 < SRC_HEIGHT) + { + a2 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 2 * src_stride_y)); + } + if(y * (uint)K0 + 3 < SRC_HEIGHT) + { + a3 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 3 * src_stride_y)); + } +#endif // K0 > 2 +#if K0 > 4 + if(y * (uint)K0 + 4 < SRC_HEIGHT) + { + a4 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 4 * src_stride_y)); + } + if(y * (uint)K0 + 5 < SRC_HEIGHT) + { + a5 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 5 * src_stride_y)); + } + if(y * (uint)K0 + 6 < SRC_HEIGHT) + { + a6 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 6 * src_stride_y)); + } + if(y * (uint)K0 + 7 < SRC_HEIGHT) + { + a7 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 7 * src_stride_y)); + } +#endif // K0 > 4 +#if K0 > 8 + if(y * (uint)K0 + 9 < SRC_HEIGHT) + { + a8 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 8 * src_stride_y)); + } + if(y * (uint)K0 + 9 < SRC_HEIGHT) + { + a9 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 9 * src_stride_y)); + } + if(y * (uint)K0 + 10 < SRC_HEIGHT) + { + aA = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 10 * src_stride_y)); + } + if(y * (uint)K0 + 11 < SRC_HEIGHT) + { + aB = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 11 * src_stride_y)); + } + if(y * (uint)K0 + 12 < SRC_HEIGHT) + { + aC = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 12 * src_stride_y)); + } + if(y * (uint)K0 + 13 < SRC_HEIGHT) + { + aD = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 13 * src_stride_y)); + } + if(y * (uint)K0 + 14 < SRC_HEIGHT) + { + aE = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 14 * src_stride_y)); + } + if(y * (uint)K0 + 15 < SRC_HEIGHT) + { + aF = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 15 * src_stride_y)); + } +#endif // K0 > 8 + + // ---------------------------Store output values ------------------------------ + + VSTORE(N0) + (a0, 0, (__global DATA_TYPE *)(output_ptr + 0 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); +#if K0 > 1 + VSTORE(N0) + (a1, 0, (__global DATA_TYPE *)(output_ptr + 1 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); +#endif // K0 > 1 +#if K0 > 2 + VSTORE(N0) + (a2, 0, (__global DATA_TYPE *)(output_ptr + 2 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(N0) + (a3, 0, (__global DATA_TYPE *)(output_ptr + 3 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); +#endif // K0 > 2 +#if K0 > 4 + VSTORE(N0) + (a4, 0, (__global DATA_TYPE *)(output_ptr + 4 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(N0) + (a5, 0, (__global DATA_TYPE *)(output_ptr + 5 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(N0) + (a6, 0, (__global DATA_TYPE *)(output_ptr + 6 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(N0) + (a7, 0, (__global DATA_TYPE *)(output_ptr + 7 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); +#endif // N0 > 4 +#if K0 > 8 + VSTORE(N0) + (a8, 0, (__global DATA_TYPE *)(output_ptr + 8 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(N0) + (a9, 0, (__global DATA_TYPE *)(output_ptr + 9 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(N0) + (aA, 0, (__global DATA_TYPE *)(output_ptr + 10 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(N0) + (aB, 0, (__global DATA_TYPE *)(output_ptr + 11 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(N0) + (aC, 0, (__global DATA_TYPE *)(output_ptr + 12 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(N0) + (aD, 0, (__global DATA_TYPE *)(output_ptr + 13 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(N0) + (aE, 0, (__global DATA_TYPE *)(output_ptr + 14 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(N0) + (aF, 0, (__global DATA_TYPE *)(output_ptr + 15 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); +#endif // N0 > 8 + +#undef BLOCK_SIZE +#undef OUTPUT_OFFSET_X +#undef OUTPUT_STEP_X +} + +#if defined(TRANSPOSE) +/** This OpenCL kernel reshapes the rhs input matrix. The kernel splits the input matrix in blocks of size K0xN0 and stores each one (transposed) in + * the output matrix unrolling the values. + * + * @note The data type must be passed at compile time using -DDATA_TYPE (i.e. -DDATA_TYPE=float) + * @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT (i.e. -DSRC_HEIGHT=16) + * @note The block's dimensions (K0 and N0) must be passed at compile time using -DK0 and -DN0 (i.e. -DK0=2, -DN0=2). + * @note The number of K0xN0 vertical blocks to store on the same output row must be passed at compile time using -DH0 (i.e. -DH0=2) + * @note If the K0xN0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time. + * @note The option -DTRANSPOSE must passed at compile time. + * @note Only the following values for K0, N0 and H0 are supported: + * N0: 2,4,8,16 + * K0: 4,8,16 + * H0: greater than 0 + * + * @param[in] src_ptr Pointer to the source RHS tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 + * @param[in] src_stride_x Stride of the source RHS tensor in X dimension (in bytes) + * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] src_stride_y Stride of the source RHS tensor in Y dimension (in bytes) + * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] src_stride_z Stride of the source RHS tensor in Z dimension (in bytes) + * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source RHS tensor + * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr + * @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes) + * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes) + * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix + */ +__kernel void gemm_reshape_rhs_matrix_t(TENSOR3D_DECLARATION(src), + TENSOR3D_DECLARATION(dst)) +{ + // Block size +#define BLOCK_SIZE ((K0) * (N0)) + + // Output offset X +#if defined(INTERLEAVE) +#define OUTPUT_OFFSET_X (K0) +#else // defined(INTERLEAVE) +#define OUTPUT_OFFSET_X (BLOCK_SIZE) +#endif // defined(INTERLEAVE) + + // Output step X +#if defined(INTERLEAVE) +#define OUTPUT_STEP_X (K0) * (H0) +#else // Do not interleave +#define OUTPUT_STEP_X (K0) +#endif // defined(INTERLEAVE) + + // Compute source and destination addresses + uint x = get_global_id(0); + uint y = get_global_id(1); + uint z = get_global_id(2); + + // ------------------ Compute input/output addresses --------------------------- + + // Compute the input address + __global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * (uint)N0 * sizeof(DATA_TYPE) + y * (uint)K0 * src_stride_y + z * (uint)src_stride_z; + + // Compute the output address + __global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + (y * (uint)BLOCK_SIZE * (uint)H0 * sizeof(DATA_TYPE)) + ((x % H0) * (uint)OUTPUT_OFFSET_X * sizeof(DATA_TYPE)) + ((x / + (uint)H0) * (uint)dst_stride_y) + z * (uint)dst_stride_z; + + // ---------------------------Load input values -------------------------------- + + VEC_DATA_TYPE(DATA_TYPE, N0) + a0 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a1 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a2 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a3 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a4 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a5 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a6 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a7 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a8 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + a9 = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + aA = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + aB = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + aC = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + aD = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + aE = 0; + VEC_DATA_TYPE(DATA_TYPE, N0) + aF = 0; + + // Load values from the RHS matrix + a0 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 0 * src_stride_y)); + if(y * (uint)K0 + 1 < SRC_HEIGHT) + { + a1 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 1 * src_stride_y)); + } + if(y * (uint)K0 + 2 < SRC_HEIGHT) + { + a2 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 2 * src_stride_y)); + } + if(y * (uint)K0 + 3 < SRC_HEIGHT) + { + a3 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 3 * src_stride_y)); + } +#if K0 > 4 + if(y * (uint)K0 + 4 < SRC_HEIGHT) + { + a4 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 4 * src_stride_y)); + } + if(y * (uint)K0 + 5 < SRC_HEIGHT) + { + a5 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 5 * src_stride_y)); + } + if(y * (uint)K0 + 6 < SRC_HEIGHT) + { + a6 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 6 * src_stride_y)); + } + if(y * (uint)K0 + 7 < SRC_HEIGHT) + { + a7 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 7 * src_stride_y)); + } +#endif // K0 > 4 +#if K0 > 8 + if(y * (uint)K0 + 9 < SRC_HEIGHT) + { + a8 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 8 * src_stride_y)); + } + if(y * (uint)K0 + 9 < SRC_HEIGHT) + { + a9 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 9 * src_stride_y)); + } + if(y * (uint)K0 + 10 < SRC_HEIGHT) + { + aA = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 10 * src_stride_y)); + } + if(y * (uint)K0 + 11 < SRC_HEIGHT) + { + aB = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 11 * src_stride_y)); + } + if(y * (uint)K0 + 12 < SRC_HEIGHT) + { + aC = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 12 * src_stride_y)); + } + if(y * (uint)K0 + 13 < SRC_HEIGHT) + { + aD = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 13 * src_stride_y)); + } + if(y * (uint)K0 + 14 < SRC_HEIGHT) + { + aE = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 14 * src_stride_y)); + } + if(y * (uint)K0 + 15 < SRC_HEIGHT) + { + aF = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 15 * src_stride_y)); + } +#endif // K0 > 8 + + // ---------------------------Transpose the block ------------------------------ + + VEC_DATA_TYPE(DATA_TYPE, K0) + res0 = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + res1 = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + res2 = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + res3 = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + res4 = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + res5 = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + res6 = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + res7 = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + res8 = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + res9 = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + resA = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + resB = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + resC = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + resD = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + resE = 0; + VEC_DATA_TYPE(DATA_TYPE, K0) + resF = 0; + +#if K0 == 4 + // This part computes the following transpositions: + // 4x2 -> 2x4 + // 4x4 -> 4x4 + // 4x8 -> 8x4 + // 4x16 -> 16x4 + res0 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s0, a1.s0, a2.s0, a3.s0); + res1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s1, a1.s1, a2.s1, a3.s1); +#if N0 > 2 + res2 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s2, a1.s2, a2.s2, a3.s2); + res3 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s3, a1.s3, a2.s3, a3.s3); +#endif // N0 > 2 +#if N0 > 4 + res4 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s4, a1.s4, a2.s4, a3.s4); + res5 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s5, a1.s5, a2.s5, a3.s5); + res6 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s6, a1.s6, a2.s6, a3.s6); + res7 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s7, a1.s7, a2.s7, a3.s7); +#endif // N0 > 4 +#if N0 > 8 + res8 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s8, a1.s8, a2.s8, a3.s8); + res9 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s9, a1.s9, a2.s9, a3.s9); + resA = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sA, a1.sA, a2.sA, a3.sA); + resB = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sB, a1.sB, a2.sB, a3.sB); + resC = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sC, a1.sC, a2.sC, a3.sC); + resD = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sD, a1.sD, a2.sD, a3.sD); + resE = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sE, a1.sE, a2.sE, a3.sE); + resF = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sF, a1.sF, a2.sF, a3.sF); +#endif // N0 > 8 + +#elif K0 == 8 // N0 == 3 + // This part computes the following transpositions: + // 8x2 -> 2x8 + // 8x4 -> 4x8 + // 8x8 -> 8x8 + // 8x16 -> 16x8 + res0 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s0, a1.s0, a2.s0, a3.s0, a4.s0, a5.s0, a6.s0, a7.s0); + res1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s1, a1.s1, a2.s1, a3.s1, a4.s1, a5.s1, a6.s1, a7.s1); +#if N0 > 2 + res2 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s2, a1.s2, a2.s2, a3.s2, a4.s2, a5.s2, a6.s2, a7.s2); + res3 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s3, a1.s3, a2.s3, a3.s3, a4.s3, a5.s3, a6.s3, a7.s3); +#endif // N0 > 2 +#if N0 > 4 + res4 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s4, a1.s4, a2.s4, a3.s4, a4.s4, a5.s4, a6.s4, a7.s4); + res5 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s5, a1.s5, a2.s5, a3.s5, a4.s5, a5.s5, a6.s5, a7.s5); + res6 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s6, a1.s6, a2.s6, a3.s6, a4.s6, a5.s6, a6.s6, a7.s6); + res7 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s7, a1.s7, a2.s7, a3.s7, a4.s7, a5.s7, a6.s7, a7.s7); +#endif // N0 > 4 +#if N0 > 8 + res8 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s8, a1.s8, a2.s8, a3.s8, a4.s8, a5.s8, a6.s8, a7.s8); + res9 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s9, a1.s9, a2.s9, a3.s9, a4.s9, a5.s9, a6.s9, a7.s9); + resA = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sA, a1.sA, a2.sA, a3.sA, a4.sA, a5.sA, a6.sA, a7.sA); + resB = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sB, a1.sB, a2.sB, a3.sB, a4.sB, a5.sB, a6.sB, a7.sB); + resC = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sC, a1.sC, a2.sC, a3.sC, a4.sC, a5.sC, a6.sC, a7.sC); + resD = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sD, a1.sD, a2.sD, a3.sD, a4.sD, a5.sD, a6.sD, a7.sD); + resE = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sE, a1.sE, a2.sE, a3.sE, a4.sE, a5.sE, a6.sE, a7.sE); + resF = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sF, a1.sF, a2.sF, a3.sF, a4.sF, a5.sF, a6.sF, a7.sF); +#endif // N0 > 8 + +#elif K0 == 16 // N0 == 16 + + // This part computes the following transpositions: + // 16x2 -> 2x16 + // 16x4 -> 4x16 + // 16x8 -> 8x16 + // 16x16 -> 16x16 + res0 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s0, a1.s0, a2.s0, a3.s0, a4.s0, a5.s0, a6.s0, a7.s0, + a8.s0, a9.s0, aA.s0, aB.s0, aC.s0, aD.s0, aE.s0, aF.s0); + res1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s1, a1.s1, a2.s1, a3.s1, a4.s1, a5.s1, a6.s1, a7.s1, + a8.s1, a9.s1, aA.s1, aB.s1, aC.s1, aD.s1, aE.s1, aF.s1); +#if N0 > 2 + res2 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s2, a1.s2, a2.s2, a3.s2, a4.s2, a5.s2, a6.s2, a7.s2, + a8.s2, a9.s2, aA.s2, aB.s2, aC.s2, aD.s2, aE.s2, aF.s2); + res3 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s3, a1.s3, a2.s3, a3.s3, a4.s3, a5.s3, a6.s3, a7.s3, + a8.s3, a9.s3, aA.s3, aB.s3, aC.s3, aD.s3, aE.s3, aF.s3); +#endif // N0 > 2 +#if N0 > 4 + res4 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s4, a1.s4, a2.s4, a3.s4, a4.s4, a5.s4, a6.s4, a7.s4, + a8.s4, a9.s4, aA.s4, aB.s4, aC.s4, aD.s4, aE.s4, aF.s4); + res5 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s5, a1.s5, a2.s5, a3.s5, a4.s5, a5.s5, a6.s5, a7.s5, + a8.s5, a9.s5, aA.s5, aB.s5, aC.s5, aD.s5, aE.s5, aF.s5); + res6 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s6, a1.s6, a2.s6, a3.s6, a4.s6, a5.s6, a6.s6, a7.s6, + a8.s6, a9.s6, aA.s6, aB.s6, aC.s6, aD.s6, aE.s6, aF.s6); + res7 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s7, a1.s7, a2.s7, a3.s7, a4.s7, a5.s7, a6.s7, a7.s7, + a8.s7, a9.s7, aA.s7, aB.s7, aC.s7, aD.s7, aE.s7, aF.s7); +#endif // N0 > 4 +#if N0 > 8 + res8 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s8, a1.s8, a2.s8, a3.s8, a4.s8, a5.s8, a6.s8, a7.s8, + a8.s8, a9.s8, aA.s8, aB.s8, aC.s8, aD.s8, aE.s8, aF.s8); + res9 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s9, a1.s9, a2.s9, a3.s9, a4.s9, a5.s9, a6.s9, a7.s9, + a8.s9, a9.s9, aA.s9, aB.s9, aC.s9, aD.s9, aE.s9, aF.s9); + resA = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sA, a1.sA, a2.sA, a3.sA, a4.sA, a5.sA, a6.sA, a7.sA, + a8.sA, a9.sA, aA.sA, aB.sA, aC.sA, aD.sA, aE.sA, aF.sA); + resB = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sB, a1.sB, a2.sB, a3.sB, a4.sB, a5.sB, a6.sB, a7.sB, + a8.sB, a9.sB, aA.sB, aB.sB, aC.sB, aD.sB, aE.sB, aF.sB); + resC = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sC, a1.sC, a2.sC, a3.sC, a4.sC, a5.sC, a6.sC, a7.sC, + a8.sC, a9.sC, aA.sC, aB.sC, aC.sC, aD.sC, aE.sC, aF.sC); + resD = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sD, a1.sD, a2.sD, a3.sD, a4.sD, a5.sD, a6.sD, a7.sD, + a8.sD, a9.sD, aA.sD, aB.sD, aC.sD, aD.sD, aE.sD, aF.sD); + resE = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sE, a1.sE, a2.sE, a3.sE, a4.sE, a5.sE, a6.sE, a7.sE, + a8.sE, a9.sE, aA.sE, aB.sE, aC.sE, aD.sE, aE.sE, aF.sE); + resF = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sF, a1.sF, a2.sF, a3.sF, a4.sF, a5.sF, a6.sF, a7.sF, + a8.sF, a9.sF, aA.sF, aB.sF, aC.sF, aD.sF, aE.sF, aF.sF); +#endif // N0 > 8 + +#else // N0 == 16 +#error "Not supported N0 value" +#endif // N0 > 2 + + // ---------------------------Store the output values ------------------------------ + + VSTORE(K0) + (res0, 0, (__global DATA_TYPE *)(output_ptr + 0 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(K0) + (res1, 0, (__global DATA_TYPE *)(output_ptr + 1 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); +#if N0 > 2 + VSTORE(K0) + (res2, 0, (__global DATA_TYPE *)(output_ptr + 2 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(K0) + (res3, 0, (__global DATA_TYPE *)(output_ptr + 3 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); +#endif // N0 > 2 +#if N0 > 4 + VSTORE(K0) + (res4, 0, (__global DATA_TYPE *)(output_ptr + 4 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(K0) + (res5, 0, (__global DATA_TYPE *)(output_ptr + 5 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(K0) + (res6, 0, (__global DATA_TYPE *)(output_ptr + 6 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(K0) + (res7, 0, (__global DATA_TYPE *)(output_ptr + 7 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); +#endif // N0 > 4 +#if N0 > 8 + VSTORE(K0) + (res8, 0, (__global DATA_TYPE *)(output_ptr + 8 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(K0) + (res9, 0, (__global DATA_TYPE *)(output_ptr + 9 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(K0) + (resA, 0, (__global DATA_TYPE *)(output_ptr + 10 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(K0) + (resB, 0, (__global DATA_TYPE *)(output_ptr + 11 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(K0) + (resC, 0, (__global DATA_TYPE *)(output_ptr + 12 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(K0) + (resD, 0, (__global DATA_TYPE *)(output_ptr + 13 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(K0) + (resE, 0, (__global DATA_TYPE *)(output_ptr + 14 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); + VSTORE(K0) + (resF, 0, (__global DATA_TYPE *)(output_ptr + 15 * OUTPUT_STEP_X * sizeof(DATA_TYPE))); +#endif // N0 > 8 + +#undef BLOCK_SIZE +#undef OUTPUT_OFFSET_X +#undef OUTPUT_STEP_X +} +#endif // defined(TRANSPOSE) +#endif // defined(K0) && defined(N0) && defined(H0) && defined(DATA_TYPE) && defined(SRC_HEIGHT) + #if defined(TRANSPOSE_W) && defined(MULT_TRANSPOSE1XW_WIDTH) #if ELEMENT_SIZE == 1 diff --git a/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp b/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp new file mode 100644 index 0000000000..7cfa234b2e --- /dev/null +++ b/src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.cpp @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h" + +#include "arm_compute/core/AccessWindowStatic.h" +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/CL/CLValidate.h" +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/CL/OpenCL.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Window.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" + +using namespace arm_compute; +using namespace arm_compute::misc::shape_calculator; + +namespace +{ +Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const GEMMRHSMatrixInfo &rhs_info) +{ + ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.n0 == 0); + ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.k0 == 0); + ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.h0 == 0); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((rhs_info.n0 & (rhs_info.n0 - 1)), "Only power of two values are allowed for n0"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG((rhs_info.k0 & (rhs_info.k0 - 1)), "Only power of two values are allowed for k0"); + ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.n0 > 16); + ARM_COMPUTE_RETURN_ERROR_ON(rhs_info.k0 > 16); + ARM_COMPUTE_RETURN_ERROR_ON((rhs_info.k0 == 1) && (rhs_info.transpose)); + ARM_COMPUTE_RETURN_ERROR_ON((rhs_info.k0 == 2) && (rhs_info.transpose)); + + ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::U8, DataType::S8, + DataType::U16, DataType::S16, DataType::U32, DataType::S32, + DataType::F16, DataType::F32); + + if(output->total_size() != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), compute_rhs_reshaped_shape(*input, rhs_info)); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + } + + return Status{}; +} + +std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const GEMMRHSMatrixInfo &rhs_info) +{ + const unsigned int num_elems_processed_per_iteration_x = rhs_info.n0; + const unsigned int num_elems_processed_per_iteration_y = rhs_info.k0; + bool window_changed = false; + + // Output auto initialization if not yet initialized + auto_init_if_empty(*output, input->clone()->set_tensor_shape(compute_rhs_reshaped_shape(*input, rhs_info))); + + // Configure window + Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y)); + + AccessWindowRectangle input_access(input, 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y); + AccessWindowStatic output_access(output, 0, 0, output->dimension(0), output->dimension(1)); + + window_changed = update_window_and_padding(win, input_access); + output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->tensor_shape())); + + // Collapse along the Z direction + // This collapse needs to be here in order to tune the Z dimension of LWS + Window collapsed = win.collapse(win, Window::DimZ); + + Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{}; + return std::make_pair(err, collapsed); +} +} // namespace + +CLGEMMReshapeRHSMatrixKernel::CLGEMMReshapeRHSMatrixKernel() + : _input(nullptr), _output(nullptr) +{ +} + +void CLGEMMReshapeRHSMatrixKernel::configure(const ICLTensor *input, ICLTensor *output, const GEMMRHSMatrixInfo &rhs_info) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + + // Perform validate step + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), rhs_info)); + + _input = input; + _output = output; + + // Create build options + CLBuildOptions build_opts; + build_opts.add_option("-DN0=" + support::cpp11::to_string(rhs_info.n0)); + build_opts.add_option("-DK0=" + support::cpp11::to_string(rhs_info.k0)); + build_opts.add_option("-DH0=" + support::cpp11::to_string(rhs_info.h0)); + build_opts.add_option_if(rhs_info.transpose, "-DTRANSPOSE"); + build_opts.add_option_if(rhs_info.interleave, "-DINTERLEAVE"); + build_opts.add_option("-DSRC_HEIGHT=" + support::cpp11::to_string(input->info()->dimension(1))); + + switch(input->info()->element_size()) + { + case 1: + build_opts.add_option("-DDATA_TYPE=uchar"); + break; + case 2: + build_opts.add_option("-DDATA_TYPE=ushort"); + break; + case 4: + build_opts.add_option("-DDATA_TYPE=uint"); + break; + default: + ARM_COMPUTE_ERROR("Data type not supported"); + } + + std::string kernel_name("gemm_reshape_rhs_matrix_"); + kernel_name += rhs_info.transpose ? "t" : "nt"; + + // Create kernel + _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options())); + + // Configure kernel window + auto win_config = validate_and_configure_window(input->info(), output->info(), rhs_info); + ARM_COMPUTE_ERROR_THROW_ON(win_config.first); + ICLKernel::configure_internal(win_config.second); +} + +Status CLGEMMReshapeRHSMatrixKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const GEMMRHSMatrixInfo &rhs_info) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, rhs_info)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(), rhs_info).first); + + return Status{}; +} + +void CLGEMMReshapeRHSMatrixKernel::run(const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); + + Window slice = window.first_slice_window_3D(); + + do + { + unsigned int idx = 0; + add_3D_tensor_argument(idx, _input, slice); + add_3D_tensor_argument(idx, _output, slice); + enqueue(queue, *this, slice); + } + while(window.slide_window_slice_3D(slice)); +} \ No newline at end of file diff --git a/tests/validation/CL/GEMMReshapeRHSMatrix.cpp b/tests/validation/CL/GEMMReshapeRHSMatrix.cpp new file mode 100644 index 0000000000..e886b7bd43 --- /dev/null +++ b/tests/validation/CL/GEMMReshapeRHSMatrix.cpp @@ -0,0 +1,330 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/runtime/CL/CLTensor.h" +#include "arm_compute/runtime/CL/CLTensorAllocator.h" +#include "tests/CL/CLAccessor.h" +#include "tests/CL/Helper.h" +#include "tests/PaddingCalculator.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace +{ +// *INDENT-OFF* +// clang-format off +/** Data types */ +const auto data_types = framework::dataset::make("DataType", { DataType::QASYMM8, DataType::F16, DataType::F32 }); + +/** Batch size values to test */ +const auto b_values = framework::dataset::make("batchsize", 1, 3); + +/** N0 values to test */ +const auto n0_values = framework::dataset::make("N0", { 2, 4, 8, 16 }); + +/** H0 values to test */ +const auto h0_values = framework::dataset::make("H0", 1, 4); + +/** Interleave values to test */ +const auto i_values = framework::dataset::make("interleave", { true, false }); + +} // namespace + +using namespace arm_compute::misc::shape_calculator; + +// Initialize the output tensor with zero and fill the border with zero +using CLGEMMReshapeRHSMatrix = CLSynthetizeFunctionInitOutputWithZeroAndWithZeroConstantBorder; + +template +using CLGEMMReshapeRHSMatrixFixture = GEMMReshapeRHSMatrixValidationFixture; + +TEST_SUITE(CL) +TEST_SUITE(GEMMReshapeRHSMatrix) + +// This configuration tests only transpose = true +DATA_TEST_CASE(Configuration0, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(), + b_values), + data_types), + n0_values), + framework::dataset::make("K0", { 4, 8, 16 })), + h0_values), + i_values), +shape_in, b_value, data_type, n0_value, k0_value, h0_value, i_value) +{ + GEMMRHSMatrixInfo rhs_info; + rhs_info.n0 = n0_value; + rhs_info.k0 = k0_value; + rhs_info.h0 = h0_value; + rhs_info.interleave = i_value; + rhs_info.transpose = true; + + const TensorShape shape_src(shape_in[0], shape_in[1], b_value); + const TensorShape shape_dst = compute_rhs_reshaped_shape(TensorInfo(shape_src, 1, data_type), rhs_info); + + // Create tensors + CLTensor src = create_tensor(shape_src, data_type); + CLTensor dst = create_tensor(shape_dst, data_type); + + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Create and configure function + CLGEMMReshapeRHSMatrixKernel reshape_rhs; + reshape_rhs.configure(&src, &dst, rhs_info); +} + +// This configuration tests only transpose = false +DATA_TEST_CASE(Configuration1, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(), + b_values), + data_types), + n0_values), + framework::dataset::make("K0", { 1, 2, 4, 8, 16 })), + h0_values), + i_values), +shape_in, b_value, data_type, n0_value, k0_value, h0_value, i_value) +{ + GEMMRHSMatrixInfo rhs_info; + rhs_info.n0 = n0_value; + rhs_info.k0 = k0_value; + rhs_info.h0 = h0_value; + rhs_info.interleave = i_value; + rhs_info.transpose = false; + + const TensorShape shape_src(shape_in[0], shape_in[1], b_value); + const TensorShape shape_dst = compute_rhs_reshaped_shape(TensorInfo(shape_src, 1, data_type), rhs_info); + + // Create tensors + CLTensor src = create_tensor(shape_src, data_type); + CLTensor dst = create_tensor(shape_dst, data_type); + + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Create and configure function + CLGEMMReshapeRHSMatrixKernel reshape_rhs; + reshape_rhs.configure(&src, &dst, rhs_info); +} + +TEST_SUITE(S32) +// RunSmall tests only for transpose = false +FIXTURE_DATA_TEST_CASE(RunSmall0, CLGEMMReshapeRHSMatrixFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(), + b_values), + framework::dataset::make("DataType", DataType::S32)), + n0_values), + framework::dataset::make("K0", { 1, 2, 4, 8, 16 })), + h0_values), + i_values), + framework::dataset::make("transpose", false))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +// RunSmall tests only for transpose = true +FIXTURE_DATA_TEST_CASE(RunSmall1, CLGEMMReshapeRHSMatrixFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(), + b_values), + framework::dataset::make("DataType", DataType::S32)), + n0_values), + framework::dataset::make("K0", { 4, 8, 16 })), + h0_values), + i_values), + framework::dataset::make("transpose", true))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +// RunLarge tests only for transpose = false +FIXTURE_DATA_TEST_CASE(RunLarge0, CLGEMMReshapeRHSMatrixFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeGEMMReshape2DShapes(), + b_values), + framework::dataset::make("DataType", DataType::S32)), + n0_values), + framework::dataset::make("K0", { 1, 2, 4, 8, 16 })), + h0_values), + i_values), + framework::dataset::make("transpose", false))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +// RunLarge tests only for transpose = true +FIXTURE_DATA_TEST_CASE(RunLarge1, CLGEMMReshapeRHSMatrixFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeGEMMReshape2DShapes(), + b_values), + framework::dataset::make("DataType", DataType::S32)), + n0_values), + framework::dataset::make("K0", { 4, 8, 16 })), + h0_values), + i_values), + framework::dataset::make("transpose", true))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S32 + +TEST_SUITE(S16) +// RunSmall tests only for transpose = false +FIXTURE_DATA_TEST_CASE(RunSmall0, CLGEMMReshapeRHSMatrixFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(), + b_values), + framework::dataset::make("DataType", DataType::S16)), + n0_values), + framework::dataset::make("K0", { 1, 2, 4, 8, 16 })), + h0_values), + i_values), + framework::dataset::make("transpose", false))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +// RunSmall tests only for transpose = true +FIXTURE_DATA_TEST_CASE(RunSmall1, CLGEMMReshapeRHSMatrixFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(), + b_values), + framework::dataset::make("DataType", DataType::S16)), + n0_values), + framework::dataset::make("K0", { 4, 8, 16 })), + h0_values), + i_values), + framework::dataset::make("transpose", true))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +// RunLarge tests only for transpose = false +FIXTURE_DATA_TEST_CASE(RunLarge0, CLGEMMReshapeRHSMatrixFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeGEMMReshape2DShapes(), + b_values), + framework::dataset::make("DataType", DataType::S16)), + n0_values), + framework::dataset::make("K0", { 1, 2, 4, 8, 16 })), + h0_values), + i_values), + framework::dataset::make("transpose", false))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +// RunLarge tests only for transpose = true +FIXTURE_DATA_TEST_CASE(RunLarge1, CLGEMMReshapeRHSMatrixFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeGEMMReshape2DShapes(), + b_values), + framework::dataset::make("DataType", DataType::S16)), + n0_values), + framework::dataset::make("K0", { 4, 8, 16 })), + h0_values), + i_values), + framework::dataset::make("transpose", true))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S16 + +TEST_SUITE(S8) +// RunSmall tests only for transpose = false +FIXTURE_DATA_TEST_CASE(RunSmall0, CLGEMMReshapeRHSMatrixFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(), + b_values), + framework::dataset::make("DataType", DataType::S8)), + n0_values), + framework::dataset::make("K0", { 1, 2, 4, 8, 16 })), + h0_values), + i_values), + framework::dataset::make("transpose", false))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +// RunSmall tests only for transpose = true +FIXTURE_DATA_TEST_CASE(RunSmall1, CLGEMMReshapeRHSMatrixFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(datasets::SmallGEMMReshape2DShapes(), + b_values), + framework::dataset::make("DataType", DataType::S8)), + n0_values), + framework::dataset::make("K0", { 4, 8, 16 })), + h0_values), + i_values), + framework::dataset::make("transpose", true))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +// RunLarge tests only for transpose = false +FIXTURE_DATA_TEST_CASE(RunLarge0, CLGEMMReshapeRHSMatrixFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeGEMMReshape2DShapes(), + b_values), + framework::dataset::make("DataType", DataType::S8)), + n0_values), + framework::dataset::make("K0", { 1, 2, 4, 8, 16 })), + h0_values), + i_values), + framework::dataset::make("transpose", false))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +// RunLarge tests only for transpose = true +FIXTURE_DATA_TEST_CASE(RunLarge1, CLGEMMReshapeRHSMatrixFixture, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine(combine(datasets::LargeGEMMReshape2DShapes(), + b_values), + framework::dataset::make("DataType", DataType::S8)), + n0_values), + framework::dataset::make("K0", { 4, 8, 16 })), + h0_values), + i_values), + framework::dataset::make("transpose", true))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // S8 +TEST_SUITE_END() // GEMMReshapeRHSMatrix +TEST_SUITE_END() // CL +} // namespace validation +} // namespace test +} // namespace arm_compute \ No newline at end of file diff --git a/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h b/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h new file mode 100644 index 0000000000..e03c4f39b8 --- /dev/null +++ b/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_GEMMRESHAPERHSMATRIX_FIXTURE +#define ARM_COMPUTE_TEST_GEMMRESHAPERHSMATRIX_FIXTURE + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "tests/AssetsLibrary.h" +#include "tests/Globals.h" +#include "tests/IAccessor.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/Helpers.h" +#include "tests/validation/reference/GEMMReshapeRHSMatrix.h" +#include "tests/validation/reference/Utils.h" + +#include + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +using namespace arm_compute::misc::shape_calculator; + +template +class GEMMReshapeRHSMatrixValidationFixture : public framework::Fixture +{ +public: + template + void setup(TensorShape shape_in, unsigned int batch_size, DataType data_type, unsigned int n0, unsigned int k0, unsigned int h0, bool interleave, bool transpose) + { + GEMMRHSMatrixInfo rhs_info; + rhs_info.n0 = n0; + rhs_info.k0 = k0; + rhs_info.h0 = h0; + rhs_info.interleave = interleave; + rhs_info.transpose = transpose; + + // Set the tensor shape + const TensorShape shape_src(shape_in[0], + shape_in[1], + batch_size); + + _target = compute_target(shape_src, data_type, rhs_info); + _reference = compute_reference(shape_src, data_type, rhs_info); + } + +protected: + template + void fill(U &&tensor) + { + library->fill_tensor_uniform(tensor, 0); + } + + TensorType compute_target(TensorShape input_shape, DataType data_type, const GEMMRHSMatrixInfo &rhs_info) + { + // Create tensors + TensorType src = create_tensor(input_shape, data_type, 1); + TensorType dst; + + // The output tensor will be auto-initialized within the function + + // Create and configure function + FunctionType gemm_rhs_reshape; + gemm_rhs_reshape.configure(&src, &dst, rhs_info); + + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Allocate tensors + src.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Fill tensors + fill(AccessorType(src)); + + // Compute GEMM RHS matrix reshape function + gemm_rhs_reshape.run(); + + return dst; + } + + SimpleTensor compute_reference(const TensorShape &input_shape, DataType data_type, const GEMMRHSMatrixInfo &rhs_info) + { + // Create reference + SimpleTensor src{ input_shape, data_type, 1 }; + + // Fill reference + fill(src); + + TensorShape output_shape = compute_rhs_reshaped_shape(TensorInfo(input_shape, 1, data_type), rhs_info); + + return reference::gemm_reshape_rhs_matrix(src, output_shape, rhs_info); + } + + TensorType _target{}; + SimpleTensor _reference{}; +}; +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_GEMMRESHAPERHSMATRIX_FIXTURE */ \ No newline at end of file diff --git a/tests/validation/reference/GEMMReshapeRHSMatrix.cpp b/tests/validation/reference/GEMMReshapeRHSMatrix.cpp new file mode 100644 index 0000000000..0224c5c67c --- /dev/null +++ b/tests/validation/reference/GEMMReshapeRHSMatrix.cpp @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "GEMMReshapeRHSMatrix.h" + +#include "arm_compute/core/Types.h" + +#include "tests/validation/Helpers.h" + +#include +#include +#include + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ +template +SimpleTensor gemm_reshape_rhs_matrix(const SimpleTensor &in, const TensorShape &output_shape, const GEMMRHSMatrixInfo &rhs_info) +{ + ARM_COMPUTE_ERROR_ON(in.shape().num_dimensions() > 3); + + SimpleTensor out{ output_shape, in.data_type() }; + + // Initialize the output tensor with zero + std::memset(&out[0], 0, out.num_elements() * sizeof(T)); + + const unsigned int N = in.shape()[0]; + const unsigned int K = in.shape()[1]; + const unsigned int B = in.shape()[2]; + + const unsigned int num_tiles_x = std::ceil(N / static_cast(rhs_info.n0)); + const unsigned int num_tiles_y = std::ceil(K / static_cast(rhs_info.k0)); + + const TensorShape tile_dims(rhs_info.n0, rhs_info.k0); + const TensorShape tile_dims_transposed(rhs_info.k0, rhs_info.n0); + + // Simple tensor for the input tile + SimpleTensor src_tile{ tile_dims, in.data_type() }; + + // Simple tensor for the input tile + SimpleTensor src_tile_transposed{ tile_dims_transposed, in.data_type() }; + + // Simple tensor to use when storing the values + SimpleTensor *tile_to_use = rhs_info.transpose ? &src_tile_transposed : &src_tile; + + const unsigned int offset_output_x = rhs_info.interleave ? tile_to_use->shape()[0] : tile_to_use->shape()[0] * tile_to_use->shape()[1]; + const unsigned int step_output_x = rhs_info.interleave ? tile_to_use->shape()[0] * rhs_info.h0 : tile_to_use->shape()[0]; + + for(unsigned int z = 0; z < B; ++z) + { + for(unsigned int y = 0; y < num_tiles_y; ++y) + { + for(unsigned int x = 0; x < num_tiles_x; ++x) + { + // Get the tile from the input tensor + get_tile(in, src_tile, Coordinates(x * rhs_info.n0, y * rhs_info.k0, z, 0)); + + if(rhs_info.transpose) + { + // Transpose matrix + transpose_matrix(src_tile, src_tile_transposed); + } + + // Store + const unsigned int offset_output = (y * rhs_info.k0 * rhs_info.n0 * rhs_info.h0) + ((x % rhs_info.h0) * offset_output_x) + ((x / rhs_info.h0) * out.shape()[0]) + (z * out.shape()[0] * out.shape()[1]); + + for(unsigned int i = 0; i < tile_to_use->shape()[1]; ++i) + { + const unsigned int offset_tile = i * tile_to_use->shape()[0]; + + // Copy per row + std::copy(&(*tile_to_use)[offset_tile], &(*tile_to_use)[offset_tile + tile_to_use->shape()[0]], &out[offset_output + i * step_output_x]); + } + } + } + } + + return out; +} +template SimpleTensor gemm_reshape_rhs_matrix(const SimpleTensor &in, const TensorShape &output_shape, const GEMMRHSMatrixInfo &rhs_info); +template SimpleTensor gemm_reshape_rhs_matrix(const SimpleTensor &in, const TensorShape &output_shape, const GEMMRHSMatrixInfo &rhs_info); +template SimpleTensor gemm_reshape_rhs_matrix(const SimpleTensor &in, const TensorShape &output_shape, const GEMMRHSMatrixInfo &rhs_info); +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute \ No newline at end of file diff --git a/tests/validation/reference/GEMMReshapeRHSMatrix.h b/tests/validation/reference/GEMMReshapeRHSMatrix.h new file mode 100644 index 0000000000..8edcfd67fa --- /dev/null +++ b/tests/validation/reference/GEMMReshapeRHSMatrix.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_TEST_GEMMRESHAPERHSMATRIX_H__ +#define __ARM_COMPUTE_TEST_GEMMRESHAPERHSMATRIX_H__ + +#include "tests/SimpleTensor.h" +#include "tests/validation/Helpers.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ +template +SimpleTensor gemm_reshape_rhs_matrix(const SimpleTensor &in, const TensorShape &output_shape, const GEMMRHSMatrixInfo &rhs_info); +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* __ARM_COMPUTE_TEST_GEMMRESHAPERHS_MATRIX_H__ */ \ No newline at end of file -- cgit v1.2.1