From 201e0fee596dafcf9c869a550fae29779aad2394 Mon Sep 17 00:00:00 2001 From: Sang-Hoon Park Date: Wed, 27 Jan 2021 13:14:56 +0000 Subject: Make Softmax kernels on OpenCL stateless * ClSoftmaxKernel and ClSoftmax are created. * ClSoftmaxKernel is now state-less and ClSoftmax handles the internal tensors required for computation. * add_const_tensor() is added to TensorPack not only to have symmetric interface but also to benefit from implicit conversion. Implements: COMPMID-3998 Change-Id: I4f823121777be24260fd12b2cd71a6ff718c4eed Signed-off-by: Sang-Hoon Park Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5087 Reviewed-by: Georgios Pinitas Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins --- src/runtime/CL/functions/CLSoftmaxLayer.cpp | 172 +++++++++------------------- 1 file changed, 57 insertions(+), 115 deletions(-) (limited to 'src/runtime/CL/functions/CLSoftmaxLayer.cpp') diff --git a/src/runtime/CL/functions/CLSoftmaxLayer.cpp b/src/runtime/CL/functions/CLSoftmaxLayer.cpp index 93e63dd779..938a10a7c0 100644 --- a/src/runtime/CL/functions/CLSoftmaxLayer.cpp +++ b/src/runtime/CL/functions/CLSoftmaxLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -22,34 +22,35 @@ * SOFTWARE. */ #include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h" - #include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/CL/CLKernelLibrary.h" #include "arm_compute/core/Helpers.h" +#include "arm_compute/core/KernelDescriptors.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/Utils.h" -#include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "arm_compute/runtime/CL/CLScheduler.h" -#include "src/core/CL/ICLKernel.h" -#include "src/core/CL/kernels/CLFillBorderKernel.h" -#include "src/core/CL/kernels/CLSoftmaxLayerKernel.h" -#include "src/core/helpers/SoftmaxHelpers.h" +#include "src/core/gpu/cl/kernels/ClSoftmaxKernel.h" +#include "src/runtime/gpu/cl/operators/ClPermute.h" +#include "src/runtime/gpu/cl/operators/ClSoftmax.h" namespace arm_compute { +using OperatorType = opencl::ClSoftmax; + +template +struct CLSoftmaxLayerGeneric::Impl +{ + const ICLTensor *src{ nullptr }; + ICLTensor *dst{ nullptr }; + std::unique_ptr op{ nullptr }; + MemoryGroup memory_group{}; + std::vector>> workspace_tensors{}; +}; + template CLSoftmaxLayerGeneric::CLSoftmaxLayerGeneric(std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), - _permute_input(), - _permute_output(), - _max_shift_exp_sum_kernel(std::make_unique()), - _norm_kernel(std::make_unique()), - _max(), - _sum(), - _tmp(), - _input_permuted(), - _output_permuted(), - _needs_permute() + : _impl(std::make_unique()) { + _impl->memory_group = MemoryGroup(std::move(memory_manager)); } template @@ -64,118 +65,59 @@ void CLSoftmaxLayerGeneric::configure(const ICLTensor *input, ICLTensor template void CLSoftmaxLayerGeneric::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, float beta, int32_t axis) { - // Perform validation step - ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(CLSoftmaxLayerGeneric::validate(input->info(), output->info(), beta, axis)); - - const size_t actual_axis = static_cast(wrap_around(axis, static_cast(input->info()->num_dimensions()))); + _impl->src = input; + _impl->dst = output; + _impl->op = std::make_unique(); - _needs_permute = actual_axis != 0; - ICLTensor *tmp_output = output; - const ICLTensor *tmp_input = _needs_permute ? &_input_permuted : input; - if(_needs_permute) - { - _memory_group.manage(&_input_permuted); - _memory_group.manage(&_output_permuted); - _permute_input.configure(compile_context, input, &_input_permuted, softmax_helpers::get_permutation_vector_from_softmax_axis(actual_axis)); - tmp_output = &_output_permuted; - } - - // Create intermediate tensors - DataType tmp_data_type = is_data_type_quantized_asymmetric(tmp_input->info()->data_type()) ? DataType::S32 : tmp_input->info()->data_type(); - TensorInfo tensor_info_tmp(tmp_input->info()->clone()->set_data_type(tmp_data_type)); - _tmp.allocator()->init(tensor_info_tmp); - TensorShape max_sum_shape = tmp_input->info()->tensor_shape(); - max_sum_shape.set(0, 1); - _max.allocator()->init(tmp_input->info()->clone()->set_tensor_shape(max_sum_shape)); - _sum.allocator()->init(tmp_input->info()->clone()->set_tensor_shape(max_sum_shape).set_data_type(tmp_data_type)); - - // Set GPU target to kernels - _max_shift_exp_sum_kernel->set_target(CLScheduler::get().target()); - - // Manage intermediate buffers - _memory_group.manage(&_tmp); - _memory_group.manage(&_max); - _memory_group.manage(&_sum); - - SoftmaxKernelInfo softmax_info; - softmax_info.beta = beta; - softmax_info.is_log = IS_LOG; - softmax_info.input_data_type = tmp_input->info()->data_type(); - - // Configure kernels - _max_shift_exp_sum_kernel->configure(compile_context, tmp_input, &_max, &_tmp, &_sum, softmax_info); - _norm_kernel->configure(compile_context, &_tmp, &_sum, tmp_output, softmax_info); - - // Allocate intermediate buffers - _tmp.allocator()->allocate(); - _max.allocator()->allocate(); - _sum.allocator()->allocate(); - if(_needs_permute) - { - _permute_output.configure(compile_context, &_output_permuted, output, softmax_helpers::get_permutation_vector_from_softmax_axis(actual_axis)); - _input_permuted.allocator()->allocate(); - _output_permuted.allocator()->allocate(); - } + SoftmaxKernelInfo softmax_info{ beta, IS_LOG, input->info()->data_type(), axis }; + _impl->op->configure(compile_context, *input->info(), *output->info(), softmax_info); } template Status CLSoftmaxLayerGeneric::validate(const ITensorInfo *input, const ITensorInfo *output, float beta, int32_t axis) { - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_dimensions() > 4, "Only up to 4 dimensions are supported"); - ARM_COMPUTE_UNUSED(beta); - ARM_COMPUTE_RETURN_ERROR_ON(axis < static_cast(-input->num_dimensions()) || static_cast(input->num_dimensions()) <= axis); - - const size_t actual_axis = static_cast(wrap_around(axis, static_cast(input->num_dimensions()))); - const bool needs_permute = actual_axis != 0; - if(needs_permute) + SoftmaxKernelInfo softmax_info{ beta, IS_LOG, input->data_type(), axis }; + return OperatorType::validate(*input, *output, softmax_info); +} + +template +void CLSoftmaxLayerGeneric::allocate_workspace() +{ + const auto memory_requirements = _impl->op->workspace(); + std::for_each(memory_requirements.begin(), memory_requirements.end(), [this](const experimental::MemoryInfo & memory_info) { - const PermutationVector permutation_vector = softmax_helpers::get_permutation_vector_from_softmax_axis(actual_axis); - const TensorShape permuted_shape = misc::shape_calculator::compute_permutation_output_shape(*input, permutation_vector); - TensorInfo input_permuted(input->clone()->set_tensor_shape(permuted_shape)); - ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(input, &input_permuted, permutation_vector)); - TensorInfo output_permuted(output->clone()->set_tensor_shape(permuted_shape)); - ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(&output_permuted, output, permutation_vector)); - } - - // Create intermediate tensor info - DataType tmp_data_type = is_data_type_quantized_asymmetric(input->data_type()) ? DataType::S32 : input->data_type(); - TensorInfo tensor_info_tmp(input->clone()->set_data_type(tmp_data_type).set_is_resizable(true)); - - TensorShape max_sum_shape = input->tensor_shape(); - max_sum_shape.set(0, 1); - TensorInfo tensor_info_max(input->clone()->set_tensor_shape(max_sum_shape).set_is_resizable(true)); - TensorInfo tensor_info_sum(input->clone()->set_tensor_shape(max_sum_shape).set_data_type(tmp_data_type).set_quantization_info(QuantizationInfo()).set_is_resizable(true)); - - SoftmaxKernelInfo softmax_info; - softmax_info.beta = beta; - softmax_info.is_log = IS_LOG; - softmax_info.input_data_type = input->data_type(); - - ARM_COMPUTE_RETURN_ON_ERROR(CLLogits1DMaxShiftExpSumKernel::validate(input, &tensor_info_max, &tensor_info_tmp, &tensor_info_sum)); - ARM_COMPUTE_RETURN_ON_ERROR(CLLogits1DNormKernel::validate(&tensor_info_tmp, &tensor_info_sum, output, softmax_info)); - - return Status{}; + auto tensor_info = TensorInfo{ TensorShape(memory_info.size), 1, DataType::U8 }; + _impl->workspace_tensors.emplace_back(memory_info.type, std::make_unique()); + auto tensor = _impl->workspace_tensors.back().second.get(); + ARM_COMPUTE_ERROR_ON_NULLPTR(tensor); + _impl->memory_group.manage(tensor); + tensor->allocator()->init(tensor_info); + tensor->allocator()->allocate(); + }); } template void CLSoftmaxLayerGeneric::run() { - MemoryGroupResourceScope scope_mg(_memory_group); + allocate_workspace(); - if(_needs_permute) - { - _permute_input.run(); - } + // Acquire all the temporaries + MemoryGroupResourceScope scope_mg(_impl->memory_group); + + ARM_COMPUTE_ERROR_ON_NULLPTR(_impl->src, _impl->dst); - CLScheduler::get().enqueue(*_max_shift_exp_sum_kernel, false); - CLScheduler::get().enqueue(*_norm_kernel, !_needs_permute); + ITensorPack pack; + pack.add_tensor(TensorType::ACL_SRC, _impl->src); + pack.add_tensor(TensorType::ACL_DST, _impl->dst); - if(_needs_permute) + std::for_each(_impl->workspace_tensors.begin(), _impl->workspace_tensors.end(), [&pack](std::pair> &wt) { - _permute_output.run(); - } + auto tensor = wt.second.get(); + ARM_COMPUTE_ERROR_ON_NULLPTR(tensor); + pack.add_tensor(wt.first, tensor); + }); + + _impl->op->run(pack); } template class CLSoftmaxLayerGeneric; -- cgit v1.2.1