From 3ecf9fefa6f6299a0736599f150d4791cc8345d9 Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Wed, 28 Apr 2021 16:11:51 +0100 Subject: Remove OpenCL padding: CLReductionOperationKernel Change the parallel implementation across the X, now every thread computes one row Add missing test for MEAN_SUM Make reduction on any axis != 0 work with num_channels > 1 Resolve COMPMID-3917 Signed-off-by: Giorgio Arena Change-Id: Ib0f99540104e3c253bcd1ea637833db533f5e76e Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5522 Comments-Addressed: Arm Jenkins Reviewed-by: Manuel Bottini Reviewed-by: Gian Marco Iodice Tested-by: Arm Jenkins --- src/runtime/CL/functions/CLReductionOperation.cpp | 223 ++-------------------- 1 file changed, 14 insertions(+), 209 deletions(-) (limited to 'src/runtime') diff --git a/src/runtime/CL/functions/CLReductionOperation.cpp b/src/runtime/CL/functions/CLReductionOperation.cpp index f40d945944..61859f8de8 100644 --- a/src/runtime/CL/functions/CLReductionOperation.cpp +++ b/src/runtime/CL/functions/CLReductionOperation.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,7 +30,6 @@ #include "arm_compute/core/Validate.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/runtime/CL/CLScheduler.h" -#include "src/core/CL/kernels/CLFillBorderKernel.h" #include "src/core/CL/kernels/CLReductionOperationKernel.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/runtime/Utils.h" @@ -38,8 +37,7 @@ namespace arm_compute { CLReductionOperation::CLReductionOperation(std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), _results_vector(), _reduction_kernels_vector(), _border_handlers_vector(), _reshape(), _num_of_stages(), _reduction_axis(), _is_serial(), - _is_reshape_required(false) + : _memory_group(std::move(memory_manager)), _unreshaped_output(), _reduction_kernel(), _reshape(), _reduction_axis(), _is_reshape_required(false) { } @@ -51,9 +49,7 @@ Status CLReductionOperation::validate(const ITensorInfo *input, const ITensorInf ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis"); - const unsigned int num_of_stages = utils::calculate_number_of_stages_only_x_axis(input->dimension(0), axis); - const bool is_serial = needs_serialized_reduction(op, input->data_type(), axis); - const bool is_reshape_required = !keep_dims; + const bool is_reshape_required = !keep_dims; if(is_reshape_required && output->total_size() != 0) { @@ -65,7 +61,6 @@ Status CLReductionOperation::validate(const ITensorInfo *input, const ITensorInf TensorInfo output_before_reshape; const auto input_shape = input->tensor_shape(); - const auto input_data_type = input->data_type(); const auto input_num_channles = input->num_channels(); const auto input_qinfo = input->quantization_info(); const auto output_data_type = output->data_type(); @@ -83,73 +78,7 @@ Status CLReductionOperation::validate(const ITensorInfo *input, const ITensorInf output_internal = &output_before_reshape; } - if(is_serial) - { - ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, output_internal, axis, op)); - } - else - { - // Create temporary tensor infos - std::vector sums_vector(num_of_stages - 1); - - // Create intermediate tensor info - TensorShape shape{ input_shape }; - - shape.set(0, ceil(shape.x() / 128.f)); - - for(unsigned int i = 0; i < num_of_stages - 1; i++) - { - initialize_tensorinfo(sums_vector[i], shape, input_data_type, input_num_channles, input_qinfo); - } - - ReductionOperation first_kernel_op; - ReductionOperation intermediate_kernel_op; - ReductionOperation last_kernel_op; - switch(op) - { - case ReductionOperation::SUM: - case ReductionOperation::MEAN_SUM: - first_kernel_op = ReductionOperation::SUM; - intermediate_kernel_op = ReductionOperation::SUM; - last_kernel_op = op; - break; - case ReductionOperation::SUM_SQUARE: - first_kernel_op = ReductionOperation::SUM_SQUARE; - intermediate_kernel_op = ReductionOperation::SUM; - last_kernel_op = ReductionOperation::SUM; - break; - case ReductionOperation::PROD: - first_kernel_op = ReductionOperation::PROD; - intermediate_kernel_op = ReductionOperation::PROD; - last_kernel_op = ReductionOperation::PROD; - break; - case ReductionOperation::MIN: - first_kernel_op = ReductionOperation::MIN; - intermediate_kernel_op = ReductionOperation::MIN; - last_kernel_op = ReductionOperation::MIN; - break; - case ReductionOperation::MAX: - first_kernel_op = ReductionOperation::MAX; - intermediate_kernel_op = ReductionOperation::MAX; - last_kernel_op = ReductionOperation::MAX; - break; - default: - ARM_COMPUTE_ERROR("Not supported"); - } - - // Validate ReductionOperation only on first kernel - ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, &sums_vector[0], axis, first_kernel_op)); - - // Validate ReductionOperation on intermediate stages - for(unsigned int i = 1; i < num_of_stages - 1; ++i) - { - ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(&sums_vector[i - 1], &sums_vector[i], axis, intermediate_kernel_op)); - } - - // Validate ReductionOperation on the last stage - const unsigned int last_stage = num_of_stages - 1; - ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(&sums_vector[last_stage - 1], output_internal, axis, last_kernel_op, input->dimension(0))); - } + ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, output_internal, axis, op)); if(is_reshape_required) { @@ -161,33 +90,15 @@ Status CLReductionOperation::validate(const ITensorInfo *input, const ITensorInf ICLTensor *CLReductionOperation::configure_intermediate_result_vector(ICLTensor *input, ICLTensor *output) { - if(!_is_reshape_required && _is_serial) - { - return output; - } - - auto intermediate_result_vector_size = _is_serial ? 1 : _num_of_stages; - if(!_is_reshape_required) { - --intermediate_result_vector_size; + return output; } - _results_vector.resize(intermediate_result_vector_size); auto shape = input->info()->tensor_shape(); - - shape.set(_reduction_axis, _is_serial ? 1 : ceil(shape.x() / 128.f)); - - for(auto &v : _results_vector) - { - if(&v == &_results_vector.back() && _is_reshape_required) - { - shape.set(_reduction_axis, 1); - } - v.allocator()->init(input->info()->clone()->set_tensor_shape(shape)); - } - - return _is_reshape_required ? &_results_vector.back() : output; + shape.set(_reduction_axis, 1); + _unreshaped_output.allocator()->init(input->info()->clone()->set_tensor_shape(shape)); + return &_unreshaped_output; } void CLReductionOperation::configure(ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, bool keep_dims) @@ -198,9 +109,7 @@ void CLReductionOperation::configure(ICLTensor *input, ICLTensor *output, unsign void CLReductionOperation::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, bool keep_dims) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - _num_of_stages = utils::calculate_number_of_stages_only_x_axis(input->info()->dimension(0), axis); _reduction_axis = axis; - _is_serial = needs_serialized_reduction(op, input->info()->data_type(), axis); _is_reshape_required = !keep_dims; auto *output_internal = configure_intermediate_result_vector(input, output); @@ -210,110 +119,17 @@ void CLReductionOperation::configure(const CLCompileContext &compile_context, IC const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->info()->tensor_shape(), axis, false); const auto output_data_type = input->info()->data_type(); auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true)); - } - - // Configure reduction operation kernels - _reduction_kernels_vector.reserve(_num_of_stages); - - // Create temporary tensors - if(_is_serial) - { - if(_is_reshape_required) - { - _memory_group.manage(&_results_vector.back()); - } - _reduction_kernels_vector.emplace_back(std::make_unique()); - _reduction_kernels_vector[0]->configure(compile_context, input, output_internal, axis, op, 0); + _memory_group.manage(&_unreshaped_output); } - else - { - _border_handlers_vector.reserve(_num_of_stages); - _memory_group.manage(&_results_vector[0]); - - ReductionOperation first_kernel_op; - ReductionOperation intermediate_kernel_op; - ReductionOperation last_kernel_op; - PixelValue pixelValue; - switch(op) - { - case ReductionOperation::SUM: - case ReductionOperation::MEAN_SUM: - first_kernel_op = ReductionOperation::SUM; - intermediate_kernel_op = ReductionOperation::SUM; - last_kernel_op = op; - pixelValue = PixelValue(); - break; - case ReductionOperation::SUM_SQUARE: - first_kernel_op = ReductionOperation::SUM_SQUARE; - intermediate_kernel_op = ReductionOperation::SUM; - last_kernel_op = ReductionOperation::SUM; - pixelValue = PixelValue(); - break; - case ReductionOperation::PROD: - first_kernel_op = ReductionOperation::PROD; - intermediate_kernel_op = ReductionOperation::PROD; - last_kernel_op = ReductionOperation::PROD; - pixelValue = PixelValue(1, input->info()->data_type()); - break; - case ReductionOperation::MIN: - first_kernel_op = ReductionOperation::MIN; - intermediate_kernel_op = ReductionOperation::MIN; - last_kernel_op = ReductionOperation::MIN; - pixelValue = std::get<1>(get_min_max(input->info()->data_type())); - break; - case ReductionOperation::MAX: - first_kernel_op = ReductionOperation::MAX; - intermediate_kernel_op = ReductionOperation::MAX; - last_kernel_op = ReductionOperation::MAX; - pixelValue = std::get<0>(get_min_max(input->info()->data_type())); - break; - default: - ARM_COMPUTE_ERROR("Not supported"); - } - - _reduction_kernels_vector.emplace_back(std::make_unique()); - _reduction_kernels_vector[0]->configure(compile_context, input, &_results_vector[0], axis, first_kernel_op); - - _border_handlers_vector.emplace_back(std::make_unique()); - _border_handlers_vector[0]->configure(compile_context, input, _reduction_kernels_vector[0]->border_size(), BorderMode::CONSTANT, pixelValue); - - // Apply ReductionOperation on intermediate stages - for(unsigned int i = 1; i < _num_of_stages - 1; ++i) - { - _memory_group.manage(&_results_vector[i]); - - _reduction_kernels_vector.emplace_back(std::make_unique()); - _reduction_kernels_vector[i]->configure(compile_context, &_results_vector[i - 1], &_results_vector[i], axis, intermediate_kernel_op); - - _border_handlers_vector.emplace_back(std::make_unique()); - _border_handlers_vector[i]->configure(compile_context, &_results_vector[i - 1], _reduction_kernels_vector[i]->border_size(), BorderMode::CONSTANT, pixelValue); - _results_vector[i - 1].allocator()->allocate(); - } - - // Apply ReductionOperation on the last stage - const unsigned int last_stage = _num_of_stages - 1; - const unsigned int input_width = input->info()->dimension(0); - - if(_is_reshape_required) - { - _memory_group.manage(&_results_vector.back()); - } - - _reduction_kernels_vector.emplace_back(std::make_unique()); - _reduction_kernels_vector[last_stage]->configure(compile_context, &_results_vector[last_stage - 1], output_internal, axis, last_kernel_op, input_width); - - _border_handlers_vector.emplace_back(std::make_unique()); - _border_handlers_vector[last_stage]->configure(compile_context, &_results_vector[last_stage - 1], _reduction_kernels_vector[last_stage]->border_size(), BorderMode::CONSTANT, pixelValue); - - _results_vector[last_stage - 1].allocator()->allocate(); - } + _reduction_kernel = std::make_unique(); + _reduction_kernel->configure(compile_context, input, output_internal, axis, op); if(_is_reshape_required) { - _reshape.configure(compile_context, &_results_vector.back(), output); - _results_vector.back().allocator()->allocate(); + _reshape.configure(compile_context, &_unreshaped_output, output); + _unreshaped_output.allocator()->allocate(); } } @@ -321,18 +137,7 @@ void CLReductionOperation::run() { MemoryGroupResourceScope scope_mg(_memory_group); - if(_is_serial) - { - CLScheduler::get().enqueue(*_reduction_kernels_vector[0], false); - } - else - { - for(unsigned int i = 0; i < _num_of_stages; ++i) - { - CLScheduler::get().enqueue(*_border_handlers_vector[i], false); - CLScheduler::get().enqueue(*_reduction_kernels_vector[i], false); - } - } + CLScheduler::get().enqueue(*_reduction_kernel, false); if(_is_reshape_required) { -- cgit v1.2.1