diff options
author | Viet-Hoa Do <viet-hoa.do@arm.com> | 2023-10-13 17:40:32 +0100 |
---|---|---|
committer | Viet-Hoa Do <viet-hoa.do@arm.com> | 2023-10-31 10:16:25 +0000 |
commit | 29254aeb11a76c86449c2f38587e9144b2f2aacb (patch) | |
tree | ca2df26e81c2417b34768ac325e0f7200b5265df /src/gpu/cl/kernels | |
parent | e5362e7e5dbccf81c5296a7e77154e11e1a14d2f (diff) | |
download | ComputeLibrary-29254aeb11a76c86449c2f38587e9144b2f2aacb.tar.gz |
Optimize CL softmax
* The new softmax implementation consists of only a single kernel.
- There are 2 versions of softmax, one for the x dimension
and one for any other dimensions.
- Softmax kernel handles both native and quantized data type.
Resolves: COMPMID-6447
Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Change-Id: I4a9ae5bc63f78aebeaa85ee48a0d102c9c245eda
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10489
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: SiCong Li <sicong.li@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/gpu/cl/kernels')
-rw-r--r-- | src/gpu/cl/kernels/ClSoftmaxKernel.cpp | 462 | ||||
-rw-r--r-- | src/gpu/cl/kernels/ClSoftmaxKernel.h | 103 |
2 files changed, 200 insertions, 365 deletions
diff --git a/src/gpu/cl/kernels/ClSoftmaxKernel.cpp b/src/gpu/cl/kernels/ClSoftmaxKernel.cpp index 1b5a2666bc..796345a923 100644 --- a/src/gpu/cl/kernels/ClSoftmaxKernel.cpp +++ b/src/gpu/cl/kernels/ClSoftmaxKernel.cpp @@ -23,361 +23,241 @@ */ #include "src/gpu/cl/kernels/ClSoftmaxKernel.h" +#include "arm_compute/core/CL/CLCompileContext.h" +#include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/ICLTensor.h" -#include "arm_compute/core/experimental/Types.h" +#include "arm_compute/core/CL/OpenCL.h" +#include "arm_compute/core/CoreTypes.h" +#include "arm_compute/core/Dimensions.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensorInfo.h" +#include "arm_compute/core/ITensorPack.h" +#include "arm_compute/core/KernelDescriptors.h" +#include "arm_compute/core/Steps.h" +#include "arm_compute/core/TensorShape.h" #include "arm_compute/core/Utils.h" +#include "arm_compute/core/utils/DataTypeUtils.h" #include "arm_compute/core/utils/helpers/AdjustVecSize.h" -#include "arm_compute/core/utils/quantization/AsymmHelpers.h" #include "arm_compute/core/utils/StringUtils.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/Window.h" -#include "src/core/CL/CLValidate.h" -#include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" #include "support/Cast.h" #include "support/StringSupport.h" +#include <string> + namespace arm_compute { namespace opencl { namespace kernels { -namespace -{ -/** Calculates softmax parameters from the quantized input scale and scaling factor for the exponent and places them as build options. - * - * Prepares these build options: - * -INPUT_BETA_MULTIPLIER, INPUT_BETA_LEFT_SHIFT - quantized representation of beta multiplier. - * -DIFF_MIN - threshold difference between maximum value of input data and current processed value, - * it defines whether the value will be taken into account or not. - * - * @param[in] build_opts Build options to extend - * @param[in] input_scale Input scaling factor - * @param[in] beta Exponent scaling factor beta - */ -CLBuildOptions prepare_quantized_softmax_build_options(float input_scale, float beta) -{ - // Number of integer bits in temporary fixed-point representation of current-to-max difference - static const int scaled_diff_int_bits = 5; - // Number of integer bits used in temporary fixed-point representation of exponent accumulator - static const int exp_accumulation_in_bits = 12; - - const double beta_multiplier = - std::min(1.0 * beta * input_scale * (1 << (31 - scaled_diff_int_bits)), (1LL << 31) - 1.0); - int input_beta_multiplier; - int input_beta_left_shift; - quantization::calculate_quantized_multiplier_greater_than_one(beta_multiplier, &input_beta_multiplier, - &input_beta_left_shift); - - const double max_input_rescaled = - 1.0 * ((1 << scaled_diff_int_bits) - 1) * (1LL << (31 - scaled_diff_int_bits)) / (1LL << input_beta_left_shift); - const int diff_min = -1.f * std::floor(max_input_rescaled); - CLBuildOptions build_opts; - build_opts.add_option("-DSCALED_DIFF_INT_BITS=" + support::cpp11::to_string(scaled_diff_int_bits)); - build_opts.add_option("-DEXP_ACCUMULATION_INT_BITS=" + support::cpp11::to_string(exp_accumulation_in_bits)); - build_opts.add_option("-DINPUT_BETA_MULTIPLIER=" + support::cpp11::to_string(input_beta_multiplier)); - build_opts.add_option("-DINPUT_BETA_LEFT_SHIFT=" + support::cpp11::to_string(input_beta_left_shift)); - build_opts.add_option("-DDIFF_MIN=" + support::cpp11::to_string(diff_min)); - - return build_opts; +ClSoftmaxKernel::ClSoftmaxKernel() +{ } -Status validate_arguments_1DMaxShiftExpSum(const ITensorInfo &src, - const ITensorInfo &max, - const ITensorInfo &dst, - const ITensorInfo &sum) +Status ClSoftmaxKernel::validate(const ITensorInfo &src, const ITensorInfo &dst, const SoftmaxKernelInfo &info) { - ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&src); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, - DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &max); + ARM_COMPUTE_UNUSED(src, dst, info); - const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(src.data_type()); + ARM_COMPUTE_RETURN_ERROR_ON(src.num_dimensions() > 4); - // Checks performed when output is configured - if (dst.total_size() != 0) - { - if (is_quantized_asymmetric) - { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&dst, 1, DataType::S32); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &dst); - } - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&src, &dst); - } + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&src, &dst); - // Checks performed when sum is configured - if (sum.total_size() != 0) - { - if (is_quantized_asymmetric) - { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&sum, 1, DataType::S32); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&max, &sum); - } - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&max, &sum); - } + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN( // + &src, DataType::F32, DataType::F16, DataType::QASYMM8, DataType::QASYMM8_SIGNED); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &dst); - return Status{}; -} + ARM_COMPUTE_RETURN_ERROR_ON(info.input_data_type != src.data_type()); + ARM_COMPUTE_RETURN_ERROR_ON(info.axis < static_cast<int32_t>(-src.num_dimensions()) || + static_cast<int32_t>(src.num_dimensions()) <= info.axis); -Status validate_arguments_1DNorm(const ITensorInfo &src, - const ITensorInfo &sum, - const ITensorInfo &dst, - const SoftmaxKernelInfo &info) -{ - ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&src); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::S32, DataType::F16, DataType::F32); - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &sum); - ARM_COMPUTE_RETURN_ERROR_ON(info.is_log && !is_data_type_float(info.input_data_type)); - - // Note: output should always have a scale of 1/256 and offset 0 - const QuantizationInfo allowed_quantization_info = - get_softmax_output_quantization_info(info.input_data_type, info.is_log); - const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(info.input_data_type); - - // Checks performed when output is configured - if (dst.total_size() != 0) + if (is_data_type_quantized_asymmetric(src.data_type())) { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&src, &dst); - if (!is_quantized_asymmetric) - { - ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &dst); - } - else - { - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&dst, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED); - ARM_COMPUTE_RETURN_ERROR_ON(dst.quantization_info() != allowed_quantization_info); - } + ARM_COMPUTE_RETURN_ERROR_ON(src.quantization_info().uniform().scale < 0); + + ARM_COMPUTE_RETURN_ERROR_ON(dst.quantization_info() != + get_softmax_output_quantization_info(src.data_type(), info.is_log)); } return Status{}; } -} // namespace - -/**< Grid size (obtained through auto-tuning) */ -const unsigned int ClLogits1DMaxShiftExpSumKernel::_grid_size = 64; -/**< Vector size in the serial case (obtained through auto-tuning) */ -const unsigned int ClLogits1DMaxShiftExpSumKernel::_serial_vector_size = 8; -/**< Vector size in the parallel case (obtained through auto-tuning, enables the best memory access pattern for Bifrost) .*/ -const unsigned int ClLogits1DMaxShiftExpSumKernel::_parallel_vector_size = 4; - -ClLogits1DMaxShiftExpSumKernel::ClLogits1DMaxShiftExpSumKernel() -{ - _type = CLKernelType::ELEMENTWISE; -} -void ClLogits1DMaxShiftExpSumKernel::configure(const CLCompileContext &compile_context, - const ITensorInfo &src, - ITensorInfo &max, - ITensorInfo &dst, - ITensorInfo &sum, - const SoftmaxKernelInfo &info) +void ClSoftmaxKernel::configure(const CLCompileContext &compile_context, + const ITensorInfo &src, + ITensorInfo &dst, + const SoftmaxKernelInfo &info) { - auto padding_info = get_padding_info({&src, &max, &dst, &sum}); + ARM_COMPUTE_UNUSED(compile_context, src, dst, info); - // Output auto initialization if not yet initialized - auto_init_if_empty(sum, src.clone()->set_tensor_shape(max.tensor_shape())); - auto_init_if_empty(dst, *src.clone()); + const auto &dst_shape = dst.tensor_shape(); - // Perform validation step - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_1DMaxShiftExpSum(src, max, dst, sum)); + const auto data_type = src.data_type(); + const auto element_size = src.element_size(); - const DataType dt = src.data_type(); - const UniformQuantizationInfo qinfo = src.quantization_info().uniform(); - const size_t reduction_dim_size = src.dimension(0); - const float beta = info.beta; - const auto is_signed_qasymm8 = is_data_type_quantized_asymmetric_signed(info.input_data_type); - const int min_value = is_signed_qasymm8 ? CL_SCHAR_MIN : 0; + const auto is_quantized = data_type == DataType::QASYMM8 || data_type == DataType::QASYMM8_SIGNED; + const auto src_qinfo = src.quantization_info().uniform(); + const auto dst_qinfo = dst.quantization_info().uniform(); - const unsigned int vector_size = adjust_vec_size(_serial_vector_size, reduction_dim_size); + const auto axis = wrap_around(info.axis, static_cast<int32_t>(src.num_dimensions())); + const auto length = dst_shape[axis]; - // Set build options - CLBuildOptions build_opts; - build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(dt)); - build_opts.add_option("-DMIN_VALUE=" + support::cpp11::to_string(min_value)); - build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vector_size)); - build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(reduction_dim_size)); - build_opts.add_option("-DVECTOR_SIZE_LEFTOVER=" + support::cpp11::to_string(reduction_dim_size % vector_size)); - build_opts.add_option("-DLOG_VECTOR_SIZE=" + support::cpp11::to_string(lround(log2(vector_size)))); - build_opts.add_option_if((reduction_dim_size % vector_size) != 0, "-DNON_MULTIPLE_OF_VECTOR_SIZE"); - build_opts.add_option_if(is_signed_qasymm8, "-DQASYMM8_SIGNED"); - build_opts.add_option_if(is_data_type_float(dt) && (beta != 1.0f), - "-DBETA=" + float_to_string_with_full_precision(beta)); - build_opts.add_option_if(is_data_type_float(dt) && info.is_log, "-DLOG_SOFTMAX"); - build_opts.add_option_if(is_data_type_float(dt), "-DMINVAL=" + ((dt == DataType::F16) ? std::string("-HALF_MAX") - : std::string("-FLT_MAX"))); - build_opts.add_option_if(is_data_type_quantized_asymmetric(dt), - "-DSCALE=" + float_to_string_with_full_precision(qinfo.scale)); - build_opts.add_option_if(is_data_type_quantized_asymmetric(dt), - "-DBETA=" + float_to_string_with_full_precision(beta)); - build_opts.add_options_if(is_data_type_quantized_asymmetric(dt), - prepare_quantized_softmax_build_options(qinfo.scale, beta).options()); - - cl::NDRange lws_hint(cl::NullRange); - std::string kernel_name = std::string("softmax_layer_max_shift_exp_sum_") + - (is_data_type_quantized_asymmetric(dt) ? "quantized_" : "") + "serial"; - - // Create kernel. - _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); + const auto tmp_data_type = is_quantized ? DataType::F32 : data_type; - // Configure window - Window win = calculate_max_window(src, Steps(reduction_dim_size)); - IClKernel::configure_internal(win, lws_hint); + const auto vec_size = adjust_vec_size(16 / element_size, dst_shape[0]); + const auto vec_size_leftover = dst_shape[0] % vec_size; - ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info)); -} + std::string kernel_name("softmax"); + CLBuildOptions build_opts; -Status ClLogits1DMaxShiftExpSumKernel::validate(const ITensorInfo &src, - const ITensorInfo &max, - const ITensorInfo &dst, - const ITensorInfo &sum) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_1DMaxShiftExpSum(src, max, dst, sum)); - return Status{}; -} + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type)); + build_opts.add_option("-DTMP_DATA_TYPE=" + get_cl_type_from_data_type(tmp_data_type)); + build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size)); + build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_leftover)); + build_opts.add_option("-DLENGTH=" + support::cpp11::to_string(length)); + build_opts.add_option_if(info.is_log, "-DIS_LOG"); + build_opts.add_option("-DBETA=" + float_to_string_with_full_precision(info.beta)); + + build_opts.add_option_if(is_quantized, "-DIS_QUANTIZED"); + build_opts.add_option_if(is_quantized, "-DSRC_OFFSET=" + float_to_string_with_full_precision(src_qinfo.offset)); + build_opts.add_option_if(is_quantized, "-DSRC_SCALE=" + float_to_string_with_full_precision(src_qinfo.scale)); + build_opts.add_option_if(is_quantized, "-DDST_OFFSET=" + float_to_string_with_full_precision(dst_qinfo.offset)); + build_opts.add_option_if(is_quantized, "-DDST_SCALE=" + float_to_string_with_full_precision(dst_qinfo.scale)); + + if (axis == 0) + { + kernel_name += "_x"; + build_opts.add_option("-DSOFTMAX_X"); -ClLogits1DMaxShiftExpSumKernel::ParallelReductionInfo ClLogits1DMaxShiftExpSumKernel::is_parallel_reduction(size_t size) -{ - bool is_parallel_reduction = (size >= (_grid_size * _serial_vector_size)) && (_grid_size > 1); - unsigned int vector_size = is_parallel_reduction ? _parallel_vector_size : _serial_vector_size; - return std::make_tuple(is_parallel_reduction, vector_size); -} + if (is_quantized) + { + _tmp_info = TensorInfo(dst_shape, 1, tmp_data_type); + } + } + else + { + kernel_name += "_non_x"; + build_opts.add_option("-DSOFTMAX_NON_X"); -void ClLogits1DMaxShiftExpSumKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) -{ - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); + TensorShape tmp_shape; - auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC)); - auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST)); - auto max = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_INT_0)); - auto sum = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_INT_1)); + tmp_shape.set(0, length * vec_size, false); + tmp_shape.set(1, dst_shape[0] + (vec_size - vec_size_leftover) % vec_size, false); - ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst, max, sum); + for (size_t i = 2; i <= static_cast<size_t>(axis); ++i) + { + tmp_shape.set(i, dst_shape[i - 1], false); + } - // Collapse window in Z dimension - Window window_collapsed = window.collapse_if_possible(IClKernel::window(), Window::DimZ); + for (size_t i = axis + 1; i < dst_shape.num_dimensions(); ++i) + { + tmp_shape.set(i, dst_shape[i], false); + } - // Reconfigure window in case of parallel reduction - ParallelReductionInfo parallel_reduction_info = is_parallel_reduction(src->info()->dimension(0)); - if (std::get<0>(parallel_reduction_info)) - { - // Launch grid_size parallel work items - window_collapsed.set(Window::DimX, Window::Dimension(0, _grid_size, 1)); + _tmp_info = TensorInfo(tmp_shape, 1, tmp_data_type); } - // Get slices - Window slice = window_collapsed.first_slice_window_3D(); - do - { - unsigned int idx = 0; - // Set inputs - add_3D_tensor_argument(idx, src, slice); - add_3D_tensor_argument(idx, max, slice); - add_3D_tensor_argument(idx, dst, slice); - add_3D_tensor_argument(idx, sum, slice); - enqueue(queue, *this, slice, lws_hint()); - } while (window_collapsed.slide_window_slice_3D(slice)); -} - -ClLogits1DNormKernel::ClLogits1DNormKernel() -{ - _type = CLKernelType::ELEMENTWISE; -} - -void ClLogits1DNormKernel::configure(const CLCompileContext &compile_context, - const ITensorInfo &src, - const ITensorInfo &sum, - ITensorInfo &dst, - const SoftmaxKernelInfo &info) -{ - auto padding_info = get_padding_info({&src, &dst, &sum}); - - // Note: output should always have a scale of 1/256 and offset 0 - const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(info.input_data_type); - const DataType output_data_type = info.input_data_type; - const QuantizationInfo allowed_quantization_info = - get_softmax_output_quantization_info(info.input_data_type, info.is_log); - const UniformQuantizationInfo qinfo = src.quantization_info().uniform(); + _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); - // Output auto initialization if not yet initialized - auto_init_if_empty(dst, - src.clone()->set_data_type(output_data_type).set_quantization_info(allowed_quantization_info)); + // Configure kernel window and kernel arguments. + Window win = calculate_max_window(src, Steps(vec_size)); - // Perform validation step - ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_1DNorm(src, sum, dst, info)); + bool has_collapsed = true; - const auto is_signed_qasymm8 = is_data_type_quantized_asymmetric_signed(info.input_data_type); - const int min_value = is_signed_qasymm8 ? CL_SCHAR_MIN : 0; - const unsigned int vector_size = adjust_vec_size(16, src.dimension(0)); + win = win.shift_dimensions(1, axis); // Remove this axis from the window/GWS. + win = win.collapse_if_possible(win, 2, has_collapsed); + ARM_COMPUTE_ERROR_ON(!has_collapsed); - // Set build options - CLBuildOptions build_opts; - build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(info.input_data_type)); - build_opts.add_option("-DMIN_VALUE=" + support::cpp11::to_string(min_value)); - build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vector_size)); - build_opts.add_option("-DVECTOR_SIZE_LEFTOVER=" + support::cpp11::to_string(src.dimension(0) % vector_size)); - build_opts.add_option_if(is_data_type_quantized_asymmetric_signed(info.input_data_type), "-DQASYMM8_SIGNED"); - build_opts.add_options_if(is_quantized_asymmetric, - prepare_quantized_softmax_build_options(qinfo.scale, info.beta).options()); - build_opts.add_option_if(info.is_log, "-DLOG_SOFTMAX"); - build_opts.add_option_if(is_quantized_asymmetric, "-DSCALE=" + float_to_string_with_full_precision(qinfo.scale)); - build_opts.add_option_if(is_quantized_asymmetric, "-DBETA=" + float_to_string_with_full_precision(info.beta)); - - // Create kernel - std::string kernel_name = std::string("softmax_layer_norm") + (is_quantized_asymmetric ? "_quantized" : ""); - _kernel = create_kernel(compile_context, kernel_name, build_opts.options()); - - // Configure window - auto win = calculate_max_window(src, Steps(vector_size)); ICLKernel::configure_internal(win); - ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info)); -} - -Status ClLogits1DNormKernel::validate(const ITensorInfo &src, - const ITensorInfo &sum, - const ITensorInfo &dst, - const SoftmaxKernelInfo &info) -{ - ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_1DNorm(src, sum, dst, info)); + _axis = axis; - return Status{}; + _config_id = "softmax_" + lower_string(string_from_data_type(data_type)); + _config_id += "_" + std::to_string(axis); + _config_id += "_" + std::to_string(length); } -void ClLogits1DNormKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) +void ClSoftmaxKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) { ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); + + const auto src = + utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC)); + auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST)); + ICLTensor *tmp = (_tmp_info.total_size() > 0) + ? utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_INT_0)) + : nullptr; - auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC)); - auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST)); - auto sum = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_INT_0)); + if (!_prepared) + { + _prepared = true; + + const auto *src_info = src->info(); + const auto *dst_info = dst->info(); + auto src_strides = src_info->strides_in_bytes(); + auto dst_strides = dst_info->strides_in_bytes(); + + const auto src_stride_axis = src_strides[_axis]; + const auto dst_stride_axis = dst_strides[_axis]; + + // This axis has been removed from execution window, hence we remove it from the list of strides + // provided to the kernel. + // In case axis > 0, src/dst_stride_axis will be provided in dedicated argument independent from global ID. + src_strides.remove(_axis); + dst_strides.remove(_axis); + + // Argument 0: src_ptr. + _kernel.setArg<cl_uint>(1, src_strides[0]); + _kernel.setArg<cl_uint>(2, src_strides[1]); + _kernel.setArg<cl_uint>(3, src_strides[2]); + _kernel.setArg<cl_uint>(4, src_info->offset_first_element_in_bytes()); + + // Argument 5: dst_ptr. + _kernel.setArg<cl_uint>(6, dst_strides[0]); + _kernel.setArg<cl_uint>(7, dst_strides[1]); + _kernel.setArg<cl_uint>(8, dst_strides[2]); + _kernel.setArg<cl_uint>(9, dst_info->offset_first_element_in_bytes()); + + if (tmp != nullptr) + { + const auto *tmp_info = tmp->info(); + const auto &tmp_strides = tmp_info->strides_in_bytes(); + + // Argument 10: tmp_ptr. + _kernel.setArg<cl_uint>(11, tmp_strides[1]); + _kernel.setArg<cl_uint>(12, tmp_strides[2]); + _kernel.setArg<cl_uint>(13, tmp_strides[3]); + _kernel.setArg<cl_uint>(14, 0); + } - ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst, sum); + if (_axis > 0) + { + _kernel.setArg<cl_uint>(15, src_stride_axis); + _kernel.setArg<cl_uint>(16, dst_stride_axis); + } + } - Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); - Window slice = window_collapsed.first_slice_window_3D(); + _kernel.setArg(0, src->cl_buffer()); + _kernel.setArg(5, dst->cl_buffer()); - do + if (tmp != nullptr) { - Window sum_slice = slice; - sum_slice.set(Window::DimX, Window::Dimension(0, 1, 1)); - - unsigned int idx = 0; - // Set inputs - add_3D_tensor_argument(idx, src, slice); - add_3D_tensor_argument(idx, sum, sum_slice); - add_3D_tensor_argument(idx, dst, slice); - enqueue(queue, *this, slice, lws_hint()); - } while (window_collapsed.slide_window_slice_3D(slice)); + _kernel.setArg(10, tmp->cl_buffer()); + } + + enqueue(queue, *this, window, lws_hint()); } + +const TensorInfo &ClSoftmaxKernel::tmp_tensor_info() const +{ + return _tmp_info; +} + } // namespace kernels } // namespace opencl } // namespace arm_compute diff --git a/src/gpu/cl/kernels/ClSoftmaxKernel.h b/src/gpu/cl/kernels/ClSoftmaxKernel.h index 2dd53da346..130dc7835c 100644 --- a/src/gpu/cl/kernels/ClSoftmaxKernel.h +++ b/src/gpu/cl/kernels/ClSoftmaxKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,11 +21,13 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_CL_SOFTMAX_KERNEL_H -#define ARM_COMPUTE_CL_SOFTMAX_KERNEL_H +#ifndef ACL_SRC_GPU_CL_KERNELS_CLSOFTMAXKERNEL_H +#define ACL_SRC_GPU_CL_KERNELS_CLSOFTMAXKERNEL_H #include "arm_compute/core/Error.h" #include "arm_compute/core/KernelDescriptors.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Window.h" #include "src/core/common/Macros.h" #include "src/gpu/cl/ClCompileContext.h" @@ -37,94 +39,47 @@ namespace opencl { namespace kernels { -/** Interface for max, shifting, exponentiating and summing the logits */ -class ClLogits1DMaxShiftExpSumKernel : public IClKernel -{ - /**< Grid size (obtained through auto-tuning) */ - static const unsigned int _grid_size; - /**< Vector size in the serial case (obtained through auto-tuning) */ - static const unsigned int _serial_vector_size; - /**< Vector size in the parallel case (obtained through auto-tuning, enables the best memory access pattern for Bifrost) .*/ - static const unsigned int _parallel_vector_size; +/** The CL kernel that performs softmax function. */ +class ClSoftmaxKernel : public IClKernel +{ public: - /** Info for whether a parallel reduction will be run and the vector size of the execution. */ - using ParallelReductionInfo = std::tuple<bool, unsigned int>; + ClSoftmaxKernel(); - ClLogits1DMaxShiftExpSumKernel(); - ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClLogits1DMaxShiftExpSumKernel); - /** Configure the kernel using the given information about tensors - * - * @param[in] compile_context The compile context to be used. - * @param[in] src Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 - * @param[in,out] max Max values tensor. Data types supported: same as @p src - * @param[out] dst Destination tensor. Data types supported: same as @p src - * @param[out] sum Sum of 1D logits tensor. Data types supported: same as @p src - * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo. - */ - void configure(const CLCompileContext &compile_context, - const ITensorInfo &src, - ITensorInfo &max, - ITensorInfo &dst, - ITensorInfo &sum, - const SoftmaxKernelInfo &info); - /** Static function to check if given info will lead to a valid configuration - * - * Similar to @ref ClLogits1DMaxShiftExpSumKernel::configure() - * - * @return a status - */ - static Status - validate(const ITensorInfo &src, const ITensorInfo &max, const ITensorInfo &dst, const ITensorInfo &sum); - /** Checks if the given size is eligible for parallel reduction - * - * @note Serial reduction is launched for width < (_grid_size * _serial_vector_size). - * @note Parallel reduction is launched for width >= (_grid_size * _serial_vector_size) and vector_size is forced to 4. + ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClSoftmaxKernel); + + /** Check if the kernel arguments are valid. * - * @param[in] size Size to check + * See @ref ClSoftmaxKernel::configure(). * - * @return A two-element tuple where the first element is a boolean specifying if a parallel reduction will be run, - * while the second element is the vector size of the execution. + * @return The status. */ - static ParallelReductionInfo is_parallel_reduction(size_t size); - - // Inherited methods overridden: - void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override; -}; - -/** Interface for calculating the final step of the Softmax Layer where each logit value is multiplied by the inverse of the sum of the logits. */ -class ClLogits1DNormKernel : public IClKernel -{ -public: - ClLogits1DNormKernel(); - ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClLogits1DNormKernel); + static Status validate(const ITensorInfo &src, const ITensorInfo &dst, const SoftmaxKernelInfo &info); - /** Set the input and output tensors. + /** Configure the kernel. * * @param[in] compile_context The compile context to be used. - * @param[in] src Source tensor. Data types supported: S32/F16/F32. If this kernel is used for log softmax, only F32/F16 is supported. - * @param[in] sum Sum tensor. Dimensions should be dim(input)-1. Data types supported: same as @p input - * @param[out] dst Destination tensor. Data types supported: QASYMM8/QASYMM8_SIGNED for S32 @p input, or same as @p input + * @param[in] src Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32 for Softmax and F16/F32 for Log Softmax + * @param[out] dst Destination tensor info. Data types supported: same as @p src * @param[in] info Contains information consumed by kernels for softmax described in @ref SoftmaxKernelInfo. */ void configure(const CLCompileContext &compile_context, const ITensorInfo &src, - const ITensorInfo &sum, ITensorInfo &dst, const SoftmaxKernelInfo &info); - /** Static function to check if given info will lead to a valid configuration - * - * Similar to @ref ClLogits1DNormKernel::configure() - * - * @return a status - */ - static Status - validate(const ITensorInfo &src, const ITensorInfo &sum, const ITensorInfo &dst, const SoftmaxKernelInfo &info); - // Inherited methods overridden: - void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override; + void run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue) override; + + /** Get the tensor info of the temporary tensor. */ + const TensorInfo &tmp_tensor_info() const; + +private: + bool _prepared{false}; + int32_t _axis{0}; + TensorInfo _tmp_info{}; }; + } // namespace kernels } // namespace opencl } // namespace arm_compute -#endif /* ARM_COMPUTE_CL_SOFTMAX_KERNEL_H */ +#endif // ACL_SRC_GPU_CL_KERNELS_CLSOFTMAXKERNEL_H |