From 35ceeb2199c569810a1524a0a21c2df2a3f5f29e Mon Sep 17 00:00:00 2001 From: Diego Lopez Recas Date: Mon, 4 Dec 2017 18:56:10 +0000 Subject: IVGCVSW-798 Add Softmax NEON support for QASYMM8 Change-Id: I4f2cca52caf210fdb7d6bb7e9436ac51cb5088b4 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/112398 Reviewed-by: Anthony Barbier Tested-by: Jenkins --- src/runtime/NEON/functions/NESoftmaxLayer.cpp | 44 ++++++++++----------------- 1 file changed, 16 insertions(+), 28 deletions(-) (limited to 'src/runtime/NEON/functions/NESoftmaxLayer.cpp') diff --git a/src/runtime/NEON/functions/NESoftmaxLayer.cpp b/src/runtime/NEON/functions/NESoftmaxLayer.cpp index 8e6773c5b1..4fb83007c5 100644 --- a/src/runtime/NEON/functions/NESoftmaxLayer.cpp +++ b/src/runtime/NEON/functions/NESoftmaxLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -32,7 +32,7 @@ using namespace arm_compute; NESoftmaxLayer::NESoftmaxLayer(std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), _max_kernel(), _shift_exp_sum_kernel(), _norm_kernel(), _fill_border_kernel(), _max(), _sum(), _tmp() + : _memory_group(std::move(memory_manager)), _max_kernel(), _softmax_kernel(), _fill_border_kernel(), _max(), _tmp() { } @@ -40,31 +40,22 @@ void NESoftmaxLayer::configure(ITensor *input, ITensor *output, float beta) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - // Create intermediate tensors shapes - TensorInfo tensor_info_tmp(input->info()->tensor_shape(), input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position()); - _tmp.allocator()->init(tensor_info_tmp); + // Configure Kernels + _max_kernel.configure(input, &_max); + _fill_border_kernel.configure(input, _max_kernel.border_size(), BorderMode::REPLICATE); + _softmax_kernel.configure(input, &_max, output, beta, &_tmp); - TensorShape shape = input->info()->tensor_shape(); - shape.set(0, 1); - TensorInfo tensor_info_max_sum(shape, input->info()->num_channels(), input->info()->data_type(), input->info()->fixed_point_position()); - _max.allocator()->init(tensor_info_max_sum); - _sum.allocator()->init(tensor_info_max_sum); + // Init intermediate tensors + _max.allocator()->init(*_max.info()); + _tmp.allocator()->init(*_tmp.info()); // Manage intermediate buffers - _memory_group.manage(&_tmp); _memory_group.manage(&_max); - _memory_group.manage(&_sum); - - // Configure Kernels - _max_kernel.configure(input, &_max); - _shift_exp_sum_kernel.configure(input, &_max, &_tmp, &_sum, beta); - _norm_kernel.configure(&_tmp, &_sum, output); - _fill_border_kernel.configure(input, _max_kernel.border_size(), BorderMode::REPLICATE); + _memory_group.manage(&_tmp); // Allocate intermediate tensors - _tmp.allocator()->allocate(); _max.allocator()->allocate(); - _sum.allocator()->allocate(); + _tmp.allocator()->allocate(); } Status NESoftmaxLayer::validate(const ITensorInfo *input, const ITensorInfo *output, float beta) @@ -72,14 +63,12 @@ Status NESoftmaxLayer::validate(const ITensorInfo *input, const ITensorInfo *out // Perform validation step ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - TensorShape max_sum_shape = input->tensor_shape(); - max_sum_shape.set(0, 1); - - TensorInfo tensor_info_max_sum(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(max_sum_shape)); + const TensorShape max_shape = TensorShape(input->tensor_shape()).set(0, 1); + const TensorInfo tensor_info_max_sum = TensorInfo(*input).set_tensor_shape(max_shape).reset_padding(); + const TensorInfo dont_care; ARM_COMPUTE_RETURN_ON_ERROR(NELogits1DMaxKernel::validate(input, &tensor_info_max_sum)); - ARM_COMPUTE_RETURN_ON_ERROR(NELogits1DShiftExpSumKernel::validate(input, &tensor_info_max_sum, input, &tensor_info_max_sum, beta)); - ARM_COMPUTE_RETURN_ON_ERROR(NELogits1DNormKernel::validate(input, &tensor_info_max_sum, output)); + ARM_COMPUTE_RETURN_ON_ERROR(NELogits1DSoftmaxKernel::validate(input, &tensor_info_max_sum, output, beta, &dont_care)); return Status{}; } @@ -90,8 +79,7 @@ void NESoftmaxLayer::run() NEScheduler::get().schedule(&_fill_border_kernel, Window::DimY); NEScheduler::get().schedule(&_max_kernel, Window::DimY); - NEScheduler::get().schedule(&_shift_exp_sum_kernel, Window::DimY); - NEScheduler::get().schedule(&_norm_kernel, Window::DimY); + NEScheduler::get().schedule(&_softmax_kernel, Window::DimY); _memory_group.release(); } -- cgit v1.2.1