diff options
author | ramelg01 <ramy.elgammal@arm.com> | 2022-02-08 23:01:31 +0000 |
---|---|---|
committer | Ramy Elgammal <ramy.elgammal@arm.com> | 2022-02-11 11:01:10 +0000 |
commit | 89aa4eb56d56c81a9d53f94dffa5fa88742e986c (patch) | |
tree | 64ac3cb37d44fcfb8cf7add9100a8f0230a51d8f /src/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp | |
parent | 2134d1bdb81e4959560d5becea06c43c083a9811 (diff) | |
download | ComputeLibrary-89aa4eb56d56c81a9d53f94dffa5fa88742e986c.tar.gz |
Improve start-up time for concatenation layers
- pass tensor's dimensions at runtime rather than compile time
- Add guard macro to compile only kernel of internest
Resolves: COMPMID-5121
Signed-off-by: Ramy Elgammal <ramy.elgammal@arm.com>
Change-Id: I76b7c0cf56d803f58ebff5494c904ace2a86ef5a
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7097
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp')
-rw-r--r-- | src/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp | 17 |
1 files changed, 12 insertions, 5 deletions
diff --git a/src/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp b/src/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp index 6e7b7f6e14..b04a80a1e9 100644 --- a/src/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp +++ b/src/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021 Arm Limited. + * Copyright (c) 2018-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -89,9 +89,6 @@ void ClWidthConcatenate2TensorsKernel::configure(const CLCompileContext &compile build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src1->data_type())); build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)); build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_leftover)); - build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(src1->dimension(2))); - build_opts.add_option("-DINPUT1_WIDTH=" + support::cpp11::to_string(src1->dimension(0))); - build_opts.add_option("-DINPUT2_WIDTH=" + support::cpp11::to_string(src2->dimension(0))); build_opts.add_option("-DELEMENT_SIZE=" + support::cpp11::to_string(src1->element_size())); build_opts.add_option("-DINPUT1_ROTATE_N=" + support::cpp11::to_string((src1->dimension(0) - vec_size_leftover) % num_elems_processed_per_iteration)); @@ -111,8 +108,16 @@ void ClWidthConcatenate2TensorsKernel::configure(const CLCompileContext &compile build_opts.add_option("-DSCALE_OUT=" + float_to_string_with_full_precision(oq_info.scale)); } + _depth = src1->dimension(2); + _input1_width = src1->dimension(0); + + std::string kernel_name = "concatenate_width_x2"; + + // A macro guard to compile ONLY the kernel of interest + build_opts.add_option("-D" + upper_string(kernel_name)); + // Create kernel - _kernel = create_kernel(compile_context, "concatenate_width_x2", build_opts.options()); + _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); // Configure kernel window Window win = calculate_max_window(*dst, Steps(num_elems_processed_per_iteration)); @@ -150,6 +155,8 @@ void ClWidthConcatenate2TensorsKernel::run_op(ITensorPack &tensors, const Window add_4D_tensor_argument(idx, src0, slice); add_4D_tensor_argument(idx, src1, slice); add_4D_tensor_argument(idx, dst, slice); + _kernel.setArg<cl_int>(idx++, _depth); + _kernel.setArg<cl_int>(idx++, _input1_width); enqueue(queue, *this, window, lws_hint()); } while(window.slide_window_slice_4D(slice)); |