diff options
author | ramelg01 <ramy.elgammal@arm.com> | 2022-02-08 23:01:31 +0000 |
---|---|---|
committer | Ramy Elgammal <ramy.elgammal@arm.com> | 2022-02-11 11:01:10 +0000 |
commit | 89aa4eb56d56c81a9d53f94dffa5fa88742e986c (patch) | |
tree | 64ac3cb37d44fcfb8cf7add9100a8f0230a51d8f /src/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp | |
parent | 2134d1bdb81e4959560d5becea06c43c083a9811 (diff) | |
download | ComputeLibrary-89aa4eb56d56c81a9d53f94dffa5fa88742e986c.tar.gz |
Improve start-up time for concatenation layers
- pass tensor's dimensions at runtime rather than compile time
- Add guard macro to compile only kernel of internest
Resolves: COMPMID-5121
Signed-off-by: Ramy Elgammal <ramy.elgammal@arm.com>
Change-Id: I76b7c0cf56d803f58ebff5494c904ace2a86ef5a
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7097
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp')
-rw-r--r-- | src/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp | 22 |
1 files changed, 15 insertions, 7 deletions
diff --git a/src/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp b/src/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp index a08490c565..741637795a 100644 --- a/src/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp +++ b/src/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -93,16 +93,16 @@ void ClWidthConcatenate4TensorsKernel::configure(const CLCompileContext &compile build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src1->data_type())); build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)); build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_leftover)); - build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(src1->dimension(2))); - build_opts.add_option("-DINPUT1_WIDTH=" + support::cpp11::to_string(src1->dimension(0))); - build_opts.add_option("-DINPUT2_WIDTH=" + support::cpp11::to_string(src2->dimension(0))); - build_opts.add_option("-DINPUT3_WIDTH=" + support::cpp11::to_string(src3->dimension(0))); - build_opts.add_option("-DINPUT4_WIDTH=" + support::cpp11::to_string(src4->dimension(0))); build_opts.add_option("-DELEMENT_SIZE=" + support::cpp11::to_string(src1->element_size())); build_opts.add_option("-DINPUT1_ROTATE_N=" + support::cpp11::to_string((src1->dimension(0) - vec_size_leftover) % num_elems_processed_per_iteration)); build_opts.add_option("-DINPUT2_ROTATE_N=" + support::cpp11::to_string((src1->dimension(0) + src2->dimension(0) - vec_size_leftover) % num_elems_processed_per_iteration)); build_opts.add_option("-DINPUT3_ROTATE_N=" + support::cpp11::to_string((src1->dimension(0) + src2->dimension(0) + src3->dimension(0) - vec_size_leftover) % num_elems_processed_per_iteration)); + _depth = src1->dimension(2); + _input1_width = src1->dimension(0); + _input2_width = src2->dimension(0); + _input3_width = src3->dimension(0); + // If soources have different quantization info set quantization parameters needed for the re-quantization process const bool have_different_qinfo = helpers::tensor_info::tensors_have_different_quantization_info(dst, src1, src2, src3, src4); if(is_data_type_quantized_asymmetric(src1->data_type()) && have_different_qinfo) @@ -124,9 +124,13 @@ void ClWidthConcatenate4TensorsKernel::configure(const CLCompileContext &compile build_opts.add_option("-DOFFSET_OUT=" + float_to_string_with_full_precision(oq_info.offset)); build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale)); } + std::string kernel_name = "concatenate_width_x4"; + + // A macro guard to compile ONLY the kernel of interest + build_opts.add_option("-D" + upper_string(kernel_name)); // Create kernel - _kernel = create_kernel(compile_context, "concatenate_width_x4", build_opts.options()); + _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); // Configure kernel window Window win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration)); @@ -176,6 +180,10 @@ void ClWidthConcatenate4TensorsKernel::run_op(ITensorPack &tensors, const Window add_4D_tensor_argument(idx, src2, slice); add_4D_tensor_argument(idx, src3, slice); add_4D_tensor_argument(idx, dst, slice); + _kernel.setArg<cl_int>(idx++, _depth); + _kernel.setArg<cl_int>(idx++, _input1_width); + _kernel.setArg<cl_int>(idx++, _input2_width); + _kernel.setArg<cl_int>(idx++, _input3_width); enqueue(queue, *this, window, lws_hint()); } while(window.slide_window_slice_4D(slice)); |