From 7e9391bb14d219cda310bff355669b5964b1f576 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Fri, 5 Oct 2018 14:49:28 +0100 Subject: COMPMID-1574 Implement ReduceMean in OpenCL Change-Id: Id331199f569f52a37280a9ada5bf84694580b93c Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/152843 Tested-by: bsgcomp Reviewed-by: Michele DiGiorgio --- src/runtime/CL/functions/CLReduceMean.cpp | 122 +++++++++++++++++ src/runtime/CL/functions/CLReductionOperation.cpp | 156 ++++++++++++++-------- 2 files changed, 223 insertions(+), 55 deletions(-) create mode 100644 src/runtime/CL/functions/CLReduceMean.cpp (limited to 'src/runtime') diff --git a/src/runtime/CL/functions/CLReduceMean.cpp b/src/runtime/CL/functions/CLReduceMean.cpp new file mode 100644 index 0000000000..6e55b81d1c --- /dev/null +++ b/src/runtime/CL/functions/CLReduceMean.cpp @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/CL/functions/CLReduceMean.h" + +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/CL/kernels/CLReductionOperationKernel.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/helpers/tensor_transform.h" +#include "arm_compute/runtime/CL/CLScheduler.h" +#include "support/ToolchainSupport.h" + +namespace arm_compute +{ +CLReduceMean::CLReduceMean(std::shared_ptr memory_manager) + : _memory_group(std::move(memory_manager)), _reduction_kernels(), _reduced_outs(), _reshape(), _reduction_ops(), _keep_dims() +{ +} +void CLReduceMean::configure(ICLTensor *input, const Coordinates &reduction_axis, bool keep_dims, ICLTensor *output) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input); + + _reduction_ops = reduction_axis.num_dimensions(); + _reduction_kernels = arm_compute::support::cpp14::make_unique(_reduction_ops); + _reduced_outs = arm_compute::support::cpp14::make_unique(_reduction_ops - (keep_dims ? 1 : 0)); + _keep_dims = keep_dims; + + // Perform reduction for every axis + for(unsigned int i = 0; i < _reduction_ops; ++i) + { + TensorShape out_shape = i == 0 ? input->info()->tensor_shape() : (_reduced_outs.get() + i - 1)->info()->tensor_shape(); + out_shape.set(reduction_axis[i], 1); + auto in = (i == 0) ? input : (_reduced_outs.get() + i - 1); + + if(i == _reduction_ops - 1 && keep_dims) + { + _reduction_kernels[i].configure(in, output, reduction_axis[i], ReductionOperation::MEAN_SUM); + } + else + { + _reduced_outs[i].allocator()->init(TensorInfo(out_shape, input->info()->num_channels(), input->info()->data_type(), input->info()->quantization_info())); + _memory_group.manage(_reduced_outs.get() + i); + _reduction_kernels[i].configure(in, _reduced_outs.get() + i, reduction_axis[i], ReductionOperation::MEAN_SUM); + } + } + + // Allocate intermediate tensors + for(unsigned int i = 0; i < _reduction_ops - (keep_dims ? 1 : 0); ++i) + { + _reduced_outs[i].allocator()->allocate(); + } + + // Configure reshape layer if we want to drop the dimensions + if(!keep_dims) + { + TensorShape out_shape = input->info()->tensor_shape(); + for(unsigned int i = 0; i < _reduction_ops; ++i) + { + out_shape.remove_dimension(reduction_axis[i]); + } + auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(out_shape)); + _reshape.configure(_reduced_outs.get() + _reduction_ops - 1, output); + } +} + +Status CLReduceMean::validate(const ITensorInfo *input, const Coordinates &reduction_axis, bool keep_dims, const ITensorInfo *output) +{ + ARM_COMPUTE_UNUSED(keep_dims); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); + ARM_COMPUTE_RETURN_ERROR_ON(reduction_axis.num_dimensions() > input->num_dimensions()); + + for(unsigned int i = 0; i < reduction_axis.num_dimensions(); ++i) + { + ARM_COMPUTE_RETURN_ERROR_ON(reduction_axis[i] > 3); + if(output->total_size() > 0) + { + ARM_COMPUTE_RETURN_ERROR_ON(output->dimension(reduction_axis[i]) != 1); + ARM_COMPUTE_RETURN_ERROR_ON(static_cast(reduction_axis[i]) > input->num_dimensions() - 1); + } + + ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, output, reduction_axis[i], ReductionOperation::MEAN_SUM, 0)); + } + + return Status{}; +} + +void CLReduceMean::run() +{ + _memory_group.acquire(); + + for(unsigned int i = 0; i < _reduction_ops; ++i) + { + _reduction_kernels[i].run(); + } + + if(!_keep_dims) + { + _reshape.run(); + } + _memory_group.release(); +} +} // namespace arm_compute diff --git a/src/runtime/CL/functions/CLReductionOperation.cpp b/src/runtime/CL/functions/CLReductionOperation.cpp index 2a171c3969..4b65c47392 100644 --- a/src/runtime/CL/functions/CLReductionOperation.cpp +++ b/src/runtime/CL/functions/CLReductionOperation.cpp @@ -37,8 +37,13 @@ using namespace arm_compute; namespace { -unsigned int calculate_number_of_stages(const ITensorInfo *input) +unsigned int calculate_number_of_stages(const ITensorInfo *input, unsigned int axis) { + // We need only 1 stage for all axis except x-axis and x-axis for QASYMM8. + if(axis != 0 || (axis == 0 && is_data_type_quantized(input->data_type()))) + { + return 1; + } // Calculate number of WGs. 16 elements per thread, 8 threads per WG const unsigned int num_of_wg = ceil(input->dimension(0) / 128.f); @@ -51,91 +56,132 @@ unsigned int calculate_number_of_stages(const ITensorInfo *input) } // namespace CLReductionOperation::CLReductionOperation(std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), _sums_vector(), _reduction_kernels_vector(), _border_handlers_vector(), _num_of_stages() + : _memory_group(std::move(memory_manager)), _sums_vector(), _reduction_kernels_vector(), _border_handlers_vector(), _num_of_stages(), _reduction_axis(), _is_quantized() { } Status CLReductionOperation::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op) { - const unsigned int num_of_stages = calculate_number_of_stages(input); - - // Create temporary tensor infos - auto sums_vector = arm_compute::support::cpp14::make_unique(num_of_stages - 1); - - // Create intermediate tensor info - TensorShape shape{ input->tensor_shape() }; + const unsigned int num_of_stages = calculate_number_of_stages(input, axis); - for(unsigned int i = 0; i < num_of_stages - 1; i++) + if(axis == 0 && !is_data_type_quantized(input->data_type())) { - shape.set(0, ceil(shape.x() / 128.f)); - sums_vector[i].set_data_type(input->data_type()); - sums_vector[i].set_tensor_shape(shape); - sums_vector[i].set_num_channels(input->num_channels()); + // Create temporary tensor infos + auto sums_vector = arm_compute::support::cpp14::make_unique(num_of_stages - 1); + + // Create intermediate tensor info + TensorShape shape{ input->tensor_shape() }; + + for(unsigned int i = 0; i < num_of_stages - 1; i++) + { + shape.set(0, ceil(shape.x() / 128.f)); + sums_vector[i].set_data_type(input->data_type()); + sums_vector[i].set_tensor_shape(shape); + sums_vector[i].set_num_channels(input->num_channels()); + } + + // Validate ReductionOperation only on first kernel + ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, sums_vector.get(), axis, op)); + + // Validate ReductionOperation on intermediate stages + for(unsigned int i = 1; i < num_of_stages - 1; ++i) + { + ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(sums_vector.get() + i - 1, sums_vector.get() + i, axis, op)); + } + + // Validate ReductionOperation on the last stage + const unsigned int last_stage = num_of_stages - 1; + ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(sums_vector.get() + last_stage - 1, output, axis, op)); } - - // Validate ReductionOperation only on first kernel - ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, sums_vector.get(), axis, op)); - - // Validate ReductionOperation on intermediate stages - for(unsigned int i = 1; i < num_of_stages - 1; ++i) + else { - ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(sums_vector.get() + i - 1, sums_vector.get() + i, axis, op)); + ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(input, output, axis, op)); } - // Validate ReductionOperation on the last stage - const unsigned int last_stage = num_of_stages - 1; - ARM_COMPUTE_RETURN_ON_ERROR(CLReductionOperationKernel::validate(sums_vector.get() + last_stage - 1, output, axis, op)); - return Status{}; } void CLReductionOperation::configure(ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op) { - _num_of_stages = calculate_number_of_stages(input->info()); - - // Create temporary tensors - _sums_vector = arm_compute::support::cpp14::make_unique(_num_of_stages - 1); + _num_of_stages = calculate_number_of_stages(input->info(), axis); + _reduction_axis = axis; + _is_quantized = is_data_type_quantized(input->info()->data_type()); // Configure reduction operation kernels _reduction_kernels_vector = arm_compute::support::cpp14::make_unique(_num_of_stages); - _border_handlers_vector = arm_compute::support::cpp14::make_unique(_num_of_stages); - TensorShape shape{ input->info()->tensor_shape() }; - for(unsigned int i = 0; i < _num_of_stages - 1; i++) + // Create temporary tensors + if(axis == 0 && !_is_quantized) { - shape.set(0, ceil(shape.x() / 128.f)); - _sums_vector[i].allocator()->init(TensorInfo(shape, input->info()->num_channels(), input->info()->data_type())); + _border_handlers_vector = arm_compute::support::cpp14::make_unique(_num_of_stages); + _sums_vector = arm_compute::support::cpp14::make_unique(_num_of_stages - 1); + TensorShape shape{ input->info()->tensor_shape() }; + for(unsigned int i = 0; i < _num_of_stages - 1; i++) + { + shape.set(0, ceil(shape.x() / 128.f)); + _sums_vector[i].allocator()->init(TensorInfo(shape, input->info()->num_channels(), input->info()->data_type())); + } + + // Apply ReductionOperation only on first kernel + _memory_group.manage(_sums_vector.get()); + + ReductionOperation first_kernel_op; + ReductionOperation last_kernel_op; + switch(op) + { + case ReductionOperation::SUM: + case ReductionOperation::MEAN_SUM: + first_kernel_op = ReductionOperation::SUM; + last_kernel_op = op; + break; + case ReductionOperation::SUM_SQUARE: + first_kernel_op = ReductionOperation::SUM_SQUARE; + last_kernel_op = ReductionOperation::SUM; + break; + default: + ARM_COMPUTE_ERROR("Not supported"); + } + + _reduction_kernels_vector[0].configure(input, _sums_vector.get(), axis, first_kernel_op); + _border_handlers_vector[0].configure(input, _reduction_kernels_vector[0].border_size(), BorderMode::CONSTANT, PixelValue(0)); + + // Apply ReductionOperation on intermediate stages + for(unsigned int i = 1; i < _num_of_stages - 1; ++i) + { + _memory_group.manage(_sums_vector.get() + i); + _reduction_kernels_vector[i].configure(_sums_vector.get() + i - 1, _sums_vector.get() + i, axis, ReductionOperation::SUM); + _border_handlers_vector[i].configure(_sums_vector.get() + i - 1, _reduction_kernels_vector[i].border_size(), BorderMode::CONSTANT, PixelValue(0)); + _sums_vector[i - 1].allocator()->allocate(); + } + + // Apply ReductionOperation on the last stage + const unsigned int last_stage = _num_of_stages - 1; + const unsigned int input_width = input->info()->dimension(0); + _reduction_kernels_vector[last_stage].configure(_sums_vector.get() + last_stage - 1, output, axis, last_kernel_op, input_width); + _border_handlers_vector[last_stage].configure(_sums_vector.get() + last_stage - 1, _reduction_kernels_vector[last_stage].border_size(), BorderMode::CONSTANT, PixelValue(0)); + _sums_vector[last_stage - 1].allocator()->allocate(); } - - // Apply ReductionOperation only on first kernel - _memory_group.manage(_sums_vector.get()); - _reduction_kernels_vector[0].configure(input, _sums_vector.get(), axis, op); - _border_handlers_vector[0].configure(input, _reduction_kernels_vector[0].border_size(), BorderMode::CONSTANT, PixelValue(0)); - - // Apply ReductionOperation on intermediate stages - for(unsigned int i = 1; i < _num_of_stages - 1; ++i) + else { - _memory_group.manage(_sums_vector.get() + i); - _reduction_kernels_vector[i].configure(_sums_vector.get() + i - 1, _sums_vector.get() + i, axis, ReductionOperation::SUM); - _border_handlers_vector[i].configure(_sums_vector.get() + i - 1, _reduction_kernels_vector[i].border_size(), BorderMode::CONSTANT, PixelValue(0)); - _sums_vector[i - 1].allocator()->allocate(); + _reduction_kernels_vector[0].configure(input, output, axis, op, 0); } - - // Apply ReductionOperation on the last stage - const unsigned int last_stage = _num_of_stages - 1; - _reduction_kernels_vector[last_stage].configure(_sums_vector.get() + last_stage - 1, output, axis, ReductionOperation::SUM); - _border_handlers_vector[last_stage].configure(_sums_vector.get() + last_stage - 1, _reduction_kernels_vector[last_stage].border_size(), BorderMode::CONSTANT, PixelValue(0)); - _sums_vector[last_stage - 1].allocator()->allocate(); } void CLReductionOperation::run() { _memory_group.acquire(); - for(unsigned int i = 0; i < _num_of_stages; ++i) + if(_reduction_axis == 0 && !_is_quantized) + { + for(unsigned int i = 0; i < _num_of_stages; ++i) + { + CLScheduler::get().enqueue(_border_handlers_vector[i], false); + CLScheduler::get().enqueue(_reduction_kernels_vector[i], false); + } + } + else { - CLScheduler::get().enqueue(_border_handlers_vector[i], false); - CLScheduler::get().enqueue(_reduction_kernels_vector[i], false); + CLScheduler::get().enqueue(_reduction_kernels_vector[0], false); } _memory_group.release(); -- cgit v1.2.1