aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2017-06-22 12:09:49 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-09-17 14:14:20 +0100
commit9f89baebb81e6a01ec06fe916564da45eb204f34 (patch)
treeedd21a058a2701f0e89456717e10011ab44d473a /src/core/CL
parent10c672c2e21bb77b7234d9d3611267400dce7ae0 (diff)
downloadComputeLibrary-9f89baebb81e6a01ec06fe916564da45eb204f34.tar.gz
COMPMID-411 - Ported CLGEMMInterleave4x4Kernel and CLGEMMTranspose1xWKernel to support 8 bit fixed point
Change-Id: If236c9047ed536e808a0ed26e97e1799ca938e03 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/78529 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Moritz Pflanzer <moritz.pflanzer@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/core/CL')
-rw-r--r--src/core/CL/CLKernelLibrary.cpp6
-rw-r--r--src/core/CL/cl_kernels/gemm.cl20
-rw-r--r--src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp16
-rw-r--r--src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp13
4 files changed, 31 insertions, 24 deletions
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 3070d4817e..45a247db1a 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -165,9 +165,9 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "gemm_vm_f16", "gemm.cl" },
{ "gemm_vm_f32", "gemm.cl" },
{ "gemm_lc_vm_f32", "gemm.cl" },
- { "gemm_transpose1x16_u8", "gemm.cl" },
- { "gemm_transpose1x8_f16", "gemm.cl" },
- { "gemm_transpose1x4_f32", "gemm.cl" },
+ { "gemm_transpose1x16", "gemm.cl" },
+ { "gemm_transpose1x8", "gemm.cl" },
+ { "gemm_transpose1x4", "gemm.cl" },
{ "harris_score_3x3", "harris_corners.cl" },
{ "harris_score_5x5", "harris_corners.cl" },
{ "harris_score_7x7", "harris_corners.cl" },
diff --git a/src/core/CL/cl_kernels/gemm.cl b/src/core/CL/cl_kernels/gemm.cl
index caf6e3ffd8..d80b5262a7 100644
--- a/src/core/CL/cl_kernels/gemm.cl
+++ b/src/core/CL/cl_kernels/gemm.cl
@@ -38,8 +38,8 @@
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
-__kernel void gemm_transpose1x4_f32(IMAGE_DECLARATION(src),
- IMAGE_DECLARATION(dst))
+__kernel void gemm_transpose1x4(IMAGE_DECLARATION(src),
+ IMAGE_DECLARATION(dst))
{
uint x = get_global_id(0);
uint y = get_global_id(1);
@@ -50,9 +50,9 @@ __kernel void gemm_transpose1x4_f32(IMAGE_DECLARATION(src),
/* Compute address for Matrix B transposed - destination. X and Y are swapped */
uint dst_addr_in_bytes = y * 16 + ((x * dst_stride_y + dst_offset_first_element_in_bytes));
- float4 b0 = vload4(0, (__global float *)src.ptr);
+ uint4 b0 = vload4(0, (__global uint *)src.ptr);
- vstore4(b0, 0, (__global float *)(dst_ptr + dst_addr_in_bytes));
+ vstore4(b0, 0, (__global uint *)(dst_ptr + dst_addr_in_bytes));
}
/** This OpenCL kernel computes the "vector" 1x8 transposition of input matrix
@@ -70,8 +70,8 @@ __kernel void gemm_transpose1x4_f32(IMAGE_DECLARATION(src),
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
-__kernel void gemm_transpose1x8_f16(IMAGE_DECLARATION(src),
- IMAGE_DECLARATION(dst))
+__kernel void gemm_transpose1x8(IMAGE_DECLARATION(src),
+ IMAGE_DECLARATION(dst))
{
uint x = get_global_id(0);
uint y = get_global_id(1);
@@ -82,9 +82,9 @@ __kernel void gemm_transpose1x8_f16(IMAGE_DECLARATION(src),
/* Compute address for Matrix B transposed - destination. X and Y are swapped */
uint dst_addr_in_bytes = y * 16 + ((x * dst_stride_y + dst_offset_first_element_in_bytes));
- half8 b0 = vload8(0, (__global half *)src.ptr);
+ ushort8 b0 = vload8(0, (__global ushort *)src.ptr);
- vstore8(b0, 0, (__global half *)(dst_ptr + dst_addr_in_bytes));
+ vstore8(b0, 0, (__global ushort *)(dst_ptr + dst_addr_in_bytes));
}
/** This OpenCL kernel computes the "vector" 1x16 transposition of input matrix
@@ -102,8 +102,8 @@ __kernel void gemm_transpose1x8_f16(IMAGE_DECLARATION(src),
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
-__kernel void gemm_transpose1x16_u8(IMAGE_DECLARATION(src),
- IMAGE_DECLARATION(dst))
+__kernel void gemm_transpose1x16(IMAGE_DECLARATION(src),
+ IMAGE_DECLARATION(dst))
{
uint x = get_global_id(0);
uint y = get_global_id(1);
diff --git a/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp b/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp
index 71d42c5606..7312cc25cb 100644
--- a/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp
+++ b/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp
@@ -43,11 +43,19 @@ CLGEMMInterleave4x4Kernel::CLGEMMInterleave4x4Kernel()
void CLGEMMInterleave4x4Kernel::configure(const ICLTensor *input, ICLTensor *output)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::S8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QS8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(output);
+
+ TensorShape output_shape = input->info()->tensor_shape();
+ output_shape.set(0, input->info()->dimension(0) * 4);
+ output_shape.set(1, std::ceil(input->info()->dimension(1) / 4.0f));
+
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->fixed_point_position());
+
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_ERROR_ON(output->info()->dimension(0) != input->info()->dimension(0) * 4);
- ARM_COMPUTE_ERROR_ON(output->info()->dimension(1) != std::ceil(static_cast<float>(input->info()->dimension(1)) / 4.0f));
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
_input = input;
_output = output;
diff --git a/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp b/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp
index 4067280bf0..0ef02f8a46 100644
--- a/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp
@@ -40,8 +40,8 @@ using namespace arm_compute;
void CLGEMMTranspose1xWKernel::configure(const ICLTensor *input, ICLTensor *output)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON(output == nullptr);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QS8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(output);
TensorShape output_shape{ input->info()->tensor_shape() };
const size_t transpose_w = 16 / input->info()->element_size();
@@ -53,6 +53,7 @@ void CLGEMMTranspose1xWKernel::configure(const ICLTensor *input, ICLTensor *outp
ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->info()->tensor_shape(), output_shape);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
const unsigned int num_elems_processed_per_iteration = max_cl_vector_width / data_size_from_type(input->info()->data_type());
const float scale_x = num_elems_processed_per_iteration;
@@ -69,13 +70,11 @@ void CLGEMMTranspose1xWKernel::configure(const ICLTensor *input, ICLTensor *outp
* |a20 a21 a22 a23| = | a00 a01 a02 a03 || a10 a11 a12 a13 || a20 a21 a22 a23 || a30 a31 a32 a33 |
* |a30 a31 a32 a33|
*
- * If the input data type is F32, the output matrix will have the following shape: [ height * 4, width / 4 ]
- * If the input data type is F16, the output matrix will have the following shape: [ height * 8, width / 8 ]
+ * The output matrix will have the following shape: [ height * W, ceil(width / W) ], where W = (16 / element size of the tensor)
*/
// Create kernel
- std::string data_type_name = lower_string(string_from_data_type(input->info()->data_type()));
- std::string kernel_name = "gemm_transpose1x" + val_to_string(num_elems_processed_per_iteration) + "_" + data_type_name;
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name));
+ std::string kernel_name = "gemm_transpose1x" + val_to_string(num_elems_processed_per_iteration);
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name));
// Configure window
Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));