From 9f89baebb81e6a01ec06fe916564da45eb204f34 Mon Sep 17 00:00:00 2001 From: Gian Marco Iodice Date: Thu, 22 Jun 2017 12:09:49 +0100 Subject: COMPMID-411 - Ported CLGEMMInterleave4x4Kernel and CLGEMMTranspose1xWKernel to support 8 bit fixed point Change-Id: If236c9047ed536e808a0ed26e97e1799ca938e03 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/78529 Tested-by: Kaizen Reviewed-by: Moritz Pflanzer Reviewed-by: Georgios Pinitas --- src/core/CL/CLKernelLibrary.cpp | 6 +++--- src/core/CL/cl_kernels/gemm.cl | 20 ++++++++++---------- src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp | 16 ++++++++++++---- src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp | 13 ++++++------- src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp | 1 + 5 files changed, 32 insertions(+), 24 deletions(-) (limited to 'src/core') diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 3070d4817e..45a247db1a 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -165,9 +165,9 @@ const std::map CLKernelLibrary::_kernel_program_map = { "gemm_vm_f16", "gemm.cl" }, { "gemm_vm_f32", "gemm.cl" }, { "gemm_lc_vm_f32", "gemm.cl" }, - { "gemm_transpose1x16_u8", "gemm.cl" }, - { "gemm_transpose1x8_f16", "gemm.cl" }, - { "gemm_transpose1x4_f32", "gemm.cl" }, + { "gemm_transpose1x16", "gemm.cl" }, + { "gemm_transpose1x8", "gemm.cl" }, + { "gemm_transpose1x4", "gemm.cl" }, { "harris_score_3x3", "harris_corners.cl" }, { "harris_score_5x5", "harris_corners.cl" }, { "harris_score_7x7", "harris_corners.cl" }, diff --git a/src/core/CL/cl_kernels/gemm.cl b/src/core/CL/cl_kernels/gemm.cl index caf6e3ffd8..d80b5262a7 100644 --- a/src/core/CL/cl_kernels/gemm.cl +++ b/src/core/CL/cl_kernels/gemm.cl @@ -38,8 +38,8 @@ * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix */ -__kernel void gemm_transpose1x4_f32(IMAGE_DECLARATION(src), - IMAGE_DECLARATION(dst)) +__kernel void gemm_transpose1x4(IMAGE_DECLARATION(src), + IMAGE_DECLARATION(dst)) { uint x = get_global_id(0); uint y = get_global_id(1); @@ -50,9 +50,9 @@ __kernel void gemm_transpose1x4_f32(IMAGE_DECLARATION(src), /* Compute address for Matrix B transposed - destination. X and Y are swapped */ uint dst_addr_in_bytes = y * 16 + ((x * dst_stride_y + dst_offset_first_element_in_bytes)); - float4 b0 = vload4(0, (__global float *)src.ptr); + uint4 b0 = vload4(0, (__global uint *)src.ptr); - vstore4(b0, 0, (__global float *)(dst_ptr + dst_addr_in_bytes)); + vstore4(b0, 0, (__global uint *)(dst_ptr + dst_addr_in_bytes)); } /** This OpenCL kernel computes the "vector" 1x8 transposition of input matrix @@ -70,8 +70,8 @@ __kernel void gemm_transpose1x4_f32(IMAGE_DECLARATION(src), * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix */ -__kernel void gemm_transpose1x8_f16(IMAGE_DECLARATION(src), - IMAGE_DECLARATION(dst)) +__kernel void gemm_transpose1x8(IMAGE_DECLARATION(src), + IMAGE_DECLARATION(dst)) { uint x = get_global_id(0); uint y = get_global_id(1); @@ -82,9 +82,9 @@ __kernel void gemm_transpose1x8_f16(IMAGE_DECLARATION(src), /* Compute address for Matrix B transposed - destination. X and Y are swapped */ uint dst_addr_in_bytes = y * 16 + ((x * dst_stride_y + dst_offset_first_element_in_bytes)); - half8 b0 = vload8(0, (__global half *)src.ptr); + ushort8 b0 = vload8(0, (__global ushort *)src.ptr); - vstore8(b0, 0, (__global half *)(dst_ptr + dst_addr_in_bytes)); + vstore8(b0, 0, (__global ushort *)(dst_ptr + dst_addr_in_bytes)); } /** This OpenCL kernel computes the "vector" 1x16 transposition of input matrix @@ -102,8 +102,8 @@ __kernel void gemm_transpose1x8_f16(IMAGE_DECLARATION(src), * @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes) * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix */ -__kernel void gemm_transpose1x16_u8(IMAGE_DECLARATION(src), - IMAGE_DECLARATION(dst)) +__kernel void gemm_transpose1x16(IMAGE_DECLARATION(src), + IMAGE_DECLARATION(dst)) { uint x = get_global_id(0); uint y = get_global_id(1); diff --git a/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp b/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp index 71d42c5606..7312cc25cb 100644 --- a/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp +++ b/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp @@ -43,11 +43,19 @@ CLGEMMInterleave4x4Kernel::CLGEMMInterleave4x4Kernel() void CLGEMMInterleave4x4Kernel::configure(const ICLTensor *input, ICLTensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QS8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_NULLPTR(output); + + TensorShape output_shape = input->info()->tensor_shape(); + output_shape.set(0, input->info()->dimension(0) * 4); + output_shape.set(1, std::ceil(input->info()->dimension(1) / 4.0f)); + + // Output auto inizialitation if not yet initialized + auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->fixed_point_position()); + + ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape); ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); - ARM_COMPUTE_ERROR_ON(output->info()->dimension(0) != input->info()->dimension(0) * 4); - ARM_COMPUTE_ERROR_ON(output->info()->dimension(1) != std::ceil(static_cast(input->info()->dimension(1)) / 4.0f)); + ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output); _input = input; _output = output; diff --git a/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp b/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp index 4067280bf0..0ef02f8a46 100644 --- a/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp +++ b/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp @@ -40,8 +40,8 @@ using namespace arm_compute; void CLGEMMTranspose1xWKernel::configure(const ICLTensor *input, ICLTensor *output) { - ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON(output == nullptr); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QS8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, DataType::F32); + ARM_COMPUTE_ERROR_ON_NULLPTR(output); TensorShape output_shape{ input->info()->tensor_shape() }; const size_t transpose_w = 16 / input->info()->element_size(); @@ -53,6 +53,7 @@ void CLGEMMTranspose1xWKernel::configure(const ICLTensor *input, ICLTensor *outp ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape); + ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output); const unsigned int num_elems_processed_per_iteration = max_cl_vector_width / data_size_from_type(input->info()->data_type()); const float scale_x = num_elems_processed_per_iteration; @@ -69,13 +70,11 @@ void CLGEMMTranspose1xWKernel::configure(const ICLTensor *input, ICLTensor *outp * |a20 a21 a22 a23| = | a00 a01 a02 a03 || a10 a11 a12 a13 || a20 a21 a22 a23 || a30 a31 a32 a33 | * |a30 a31 a32 a33| * - * If the input data type is F32, the output matrix will have the following shape: [ height * 4, width / 4 ] - * If the input data type is F16, the output matrix will have the following shape: [ height * 8, width / 8 ] + * The output matrix will have the following shape: [ height * W, ceil(width / W) ], where W = (16 / element size of the tensor) */ // Create kernel - std::string data_type_name = lower_string(string_from_data_type(input->info()->data_type())); - std::string kernel_name = "gemm_transpose1x" + val_to_string(num_elems_processed_per_iteration) + "_" + data_type_name; - _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name)); + std::string kernel_name = "gemm_transpose1x" + val_to_string(num_elems_processed_per_iteration); + _kernel = static_cast(CLKernelLibrary::get().create_kernel(kernel_name)); // Configure window Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration)); diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp index 38e7eaee78..f6cf2d1f8d 100644 --- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp +++ b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp @@ -56,6 +56,7 @@ void NEGEMMTranspose1xWKernel::configure(const ITensor *input, ITensor *output) ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape); + ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output); const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size(); const float scale_x = num_elems_processed_per_iteration; -- cgit v1.2.1