diff options
Diffstat (limited to 'src/runtime')
-rw-r--r-- | src/runtime/CL/functions/CLSoftmaxLayer.cpp | 62 | ||||
-rw-r--r-- | src/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.cpp | 14 | ||||
-rw-r--r-- | src/runtime/NEON/functions/NESoftmaxLayer.cpp | 58 |
3 files changed, 73 insertions, 61 deletions
diff --git a/src/runtime/CL/functions/CLSoftmaxLayer.cpp b/src/runtime/CL/functions/CLSoftmaxLayer.cpp index b0b2117cd9..71ccf9fa01 100644 --- a/src/runtime/CL/functions/CLSoftmaxLayer.cpp +++ b/src/runtime/CL/functions/CLSoftmaxLayer.cpp @@ -42,35 +42,38 @@ CLSoftmaxLayerGeneric<IS_LOG>::CLSoftmaxLayerGeneric(std::shared_ptr<IMemoryMana } template <bool IS_LOG> -void CLSoftmaxLayerGeneric<IS_LOG>::configure_reshape_input_kernel(const ICLTensor *input, const ICLTensor *output, size_t axis) +void CLSoftmaxLayerGeneric<IS_LOG>::configure_reshape_input_kernel(const ICLTensor *input, const ICLTensor *output, size_t first_n_reduce_axes) { - configure_reshape_input_kernel(CLKernelLibrary::get().get_compile_context(), input, output, axis); + configure_reshape_input_kernel(CLKernelLibrary::get().get_compile_context(), input, output, first_n_reduce_axes); } template <bool IS_LOG> -void CLSoftmaxLayerGeneric<IS_LOG>::configure_reshape_input_kernel(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *output, size_t axis) +void CLSoftmaxLayerGeneric<IS_LOG>::configure_reshape_input_kernel(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *output, size_t first_n_reduce_axes) { // Flatten the input - const TensorShape shape_flatten = misc::shape_calculator::compute_softmax_shape(input->info(), axis); + const TensorShape shape_flatten = misc::shape_calculator::compute_softmax_shape(input->info(), first_n_reduce_axes); // Initialize the flat input _input_flattened.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten)); // If we need to flatten the input, we can use CLFlattenKernel or CLReshapeKernel - // If flattening on the third axes, we use CLFlattenKernel. + // If the number of reduced axes is 3 (max dimension), which means collapsing all axes except the batch axis, we use CLFlattenKernel. // In all other cases we have to use CLReshapeKernel - if(axis != 3) - { - auto reshape_kernel_ptr = support::cpp14::make_unique<CLReshapeLayerKernel>(); - reshape_kernel_ptr->configure(compile_context, input, &_input_flattened); - _flatten_kernel_ptr = std::move(reshape_kernel_ptr); - } - else + // Note that the "other cases" include both: + // 1. first_n_reduce_axes < 3: Reduce the first 1 (no need to reduce) or 2 dimensions (inclusive) + // 2. first_n_reduce_axes == 4: Reduce all 4 dimensions. This can only be handled by CLReshapeKernel instead of CLFlattenKernel. + if(first_n_reduce_axes == 3) { auto flatten_kernel_ptr = support::cpp14::make_unique<CLFlattenLayerKernel>(); flatten_kernel_ptr->configure(compile_context, input, &_input_flattened); _flatten_kernel_ptr = std::move(flatten_kernel_ptr); } + else + { + auto reshape_kernel_ptr = support::cpp14::make_unique<CLReshapeLayerKernel>(); + reshape_kernel_ptr->configure(compile_context, input, &_input_flattened); + _flatten_kernel_ptr = std::move(reshape_kernel_ptr); + } // We need to init the output tensor here. Indeed, the reshape kernel expects // both tensors to be already initialized @@ -78,20 +81,23 @@ void CLSoftmaxLayerGeneric<IS_LOG>::configure_reshape_input_kernel(const CLCompi } template <bool IS_LOG> -void CLSoftmaxLayerGeneric<IS_LOG>::configure(const ICLTensor *input, ICLTensor *output, float beta, size_t axis) +void CLSoftmaxLayerGeneric<IS_LOG>::configure(const ICLTensor *input, ICLTensor *output, float beta, size_t reduce_end_axis) { - configure(CLKernelLibrary::get().get_compile_context(), input, output, beta, axis); + configure(CLKernelLibrary::get().get_compile_context(), input, output, beta, reduce_end_axis); } template <bool IS_LOG> -void CLSoftmaxLayerGeneric<IS_LOG>::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, float beta, size_t axis) +void CLSoftmaxLayerGeneric<IS_LOG>::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, float beta, size_t reduce_end_axis) { // Perform validation step ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(CLSoftmaxLayerGeneric<IS_LOG>::validate(input->info(), output->info(), beta, axis)); + ARM_COMPUTE_ERROR_THROW_ON(CLSoftmaxLayerGeneric<IS_LOG>::validate(input->info(), output->info(), beta, reduce_end_axis)); - // We don't need flattening only in the case the input is 2D and axis is 1 - _needs_flattening = axis != 1; + // Convert reduce-before axis (inclusive) to first n axes to reduce + size_t first_n_reduce_axes = dim_index_2_num_dims(reduce_end_axis, input->info()->num_dimensions()); + + // We only need flattening when the number of axes to reduce is greater than 1 + _needs_flattening = first_n_reduce_axes > 1; // If we are dealing with a 4D tensor, we will: // - Flatten the input, so that we end up with a [width*height*depth] * batches 2D tensor @@ -102,8 +108,8 @@ void CLSoftmaxLayerGeneric<IS_LOG>::configure(const CLCompileContext &compile_co // Add to the memory manager _input_flattened _memory_group.manage(&_input_flattened); - // Cofigure _flatten_kernel and _input_flattened - configure_reshape_input_kernel(input, output, axis); + // Cofigure _flatten_kernel and _input_flattened + configure_reshape_input_kernel(input, output, first_n_reduce_axes); } // We want to deal with a 2D input. Either it is the flattened version of the original input (4D case) @@ -165,11 +171,15 @@ void CLSoftmaxLayerGeneric<IS_LOG>::configure(const CLCompileContext &compile_co } template <bool IS_LOG> -Status CLSoftmaxLayerGeneric<IS_LOG>::validate(const ITensorInfo *input, const ITensorInfo *output, float beta, size_t axis) +Status CLSoftmaxLayerGeneric<IS_LOG>::validate(const ITensorInfo *input, const ITensorInfo *output, float beta, size_t reduce_end_axis) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_dimensions() > 4, "Only up to 4 dimensions are supported"); ARM_COMPUTE_UNUSED(beta); + ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() <= reduce_end_axis); + + // Convert reduce-before axis (inclusive) to first n axes to reduce + size_t first_n_reduce_axes = dim_index_2_num_dims(reduce_end_axis, input->num_dimensions()); // Create intermediate tensor info DataType tmp_data_type = is_data_type_quantized_asymmetric(input->data_type()) ? DataType::S32 : input->data_type(); @@ -180,20 +190,20 @@ Status CLSoftmaxLayerGeneric<IS_LOG>::validate(const ITensorInfo *input, const I TensorInfo tensor_info_max(input->clone()->set_tensor_shape(max_sum_shape).set_is_resizable(true)); TensorInfo tensor_info_sum(input->clone()->set_tensor_shape(max_sum_shape).set_data_type(tmp_data_type).set_quantization_info(QuantizationInfo()).set_is_resizable(true)); - const bool needs_flattening = (axis != 1); + const bool needs_flattening = (first_n_reduce_axes > 1); if(needs_flattening) { - const TensorShape shape_flatten = misc::shape_calculator::compute_softmax_shape(input, axis); + const TensorShape shape_flatten = misc::shape_calculator::compute_softmax_shape(input, first_n_reduce_axes); TensorInfo tensor_info_flat(input->clone()->set_tensor_shape(shape_flatten).set_is_resizable(true)); - if(axis != 3) + if(first_n_reduce_axes == 3) { - ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayerKernel::validate(input, &tensor_info_flat)); + ARM_COMPUTE_RETURN_ON_ERROR(CLFlattenLayerKernel::validate(input, &tensor_info_flat)); } else { - ARM_COMPUTE_RETURN_ON_ERROR(CLFlattenLayerKernel::validate(input, &tensor_info_flat)); + ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayerKernel::validate(input, &tensor_info_flat)); } } diff --git a/src/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.cpp b/src/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.cpp index 0645ae7f8f..659d0eb57f 100644 --- a/src/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.cpp +++ b/src/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -27,20 +27,20 @@ #include "arm_compute/core/Helpers.h" #include "arm_compute/runtime/GLES_COMPUTE/GCScheduler.h" -using namespace arm_compute; - +namespace arm_compute +{ GCSoftmaxLayer::GCSoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager) : _memory_group(std::move(memory_manager)), _max_kernel(), _shift_exp_sum_kernel(), _norm_kernel(), _max(), _sum(), _tmp() { } -void GCSoftmaxLayer::configure(const IGCTensor *input, IGCTensor *output, float beta, size_t axis) +void GCSoftmaxLayer::configure(const IGCTensor *input, IGCTensor *output, float beta, size_t reduce_end_axis) { - ARM_COMPUTE_UNUSED(beta, axis); + ARM_COMPUTE_UNUSED(beta, reduce_end_axis); ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON(beta != 1.0f); - ARM_COMPUTE_ERROR_ON_MSG(axis != 1, "Axis must be 1 for GLES"); + ARM_COMPUTE_ERROR_ON_MSG(reduce_end_axis != 0, "Reduce_end_axis must be 0 for GLES"); // Create intermediate tensors shapes _tmp.allocator()->init(TensorInfo(input->info()->tensor_shape(), input->info()->num_channels(), input->info()->data_type())); @@ -77,3 +77,5 @@ void GCSoftmaxLayer::run() GCScheduler::get().memory_barrier(); GCScheduler::get().dispatch(_norm_kernel); } + +} // namespace arm_compute
\ No newline at end of file diff --git a/src/runtime/NEON/functions/NESoftmaxLayer.cpp b/src/runtime/NEON/functions/NESoftmaxLayer.cpp index 57d75af779..5509edec87 100644 --- a/src/runtime/NEON/functions/NESoftmaxLayer.cpp +++ b/src/runtime/NEON/functions/NESoftmaxLayer.cpp @@ -27,9 +27,6 @@ #include "arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" #include "arm_compute/runtime/NEON/NEScheduler.h" -#include "utils/TypePrinter.h" - -#include <cfloat> namespace arm_compute { @@ -41,29 +38,32 @@ NESoftmaxLayerGeneric<IS_LOG>::NESoftmaxLayerGeneric(std::shared_ptr<IMemoryMana } template <bool IS_LOG> -void NESoftmaxLayerGeneric<IS_LOG>::configure_reshape_input_kernel(const ITensor *input, const ITensor *output, int32_t axis) +void NESoftmaxLayerGeneric<IS_LOG>::configure_reshape_input_kernel(const ITensor *input, const ITensor *output, int32_t first_n_reduce_axes) { // Flatten the input - const TensorShape shape_flatten = misc::shape_calculator::compute_softmax_shape(input->info(), axis); + const TensorShape shape_flatten = misc::shape_calculator::compute_softmax_shape(input->info(), first_n_reduce_axes); // Initialize the flat input _input_flattened.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten)); // If we need to flatten the input, we can use NEFlattenKernel or NEReshapeKernel - // If flattening on the third axes, we use NEFlattenKernel. + // If the number of reduced axes is 3 (max dimension), which means collapsing all axes except the batch axis, we use NEFlattenKernel. // In all other cases we have to use NEReshapeKernel - if(axis != 3) - { - auto reshape_kernel_ptr = support::cpp14::make_unique<NEReshapeLayerKernel>(); - reshape_kernel_ptr->configure(input, &_input_flattened); - _flat_or_reshape_kernel_ptr = std::move(reshape_kernel_ptr); - } - else + // Note that the "other cases" include both: + // 1. first_n_reduce_axes < 3: Reduce the first 1 (no need to reduce) or 2 dimensions (inclusive) + // 2. first_n_reduce_axes == 4: Reduce all 4 dimensions. This can only be handled by NEReshapeKernel instead of NEFlattenKernel. + if(first_n_reduce_axes == 3) { auto flatten_kernel_ptr = support::cpp14::make_unique<NEFlattenLayerKernel>(); flatten_kernel_ptr->configure(input, &_input_flattened); _flat_or_reshape_kernel_ptr = std::move(flatten_kernel_ptr); } + else + { + auto reshape_kernel_ptr = support::cpp14::make_unique<NEReshapeLayerKernel>(); + reshape_kernel_ptr->configure(input, &_input_flattened); + _flat_or_reshape_kernel_ptr = std::move(reshape_kernel_ptr); + } // We need to init the output tensor here. Indeed, the reshape kernel expects // both tensors to be already initialized @@ -71,17 +71,17 @@ void NESoftmaxLayerGeneric<IS_LOG>::configure_reshape_input_kernel(const ITensor } template <bool IS_LOG> -void NESoftmaxLayerGeneric<IS_LOG>::configure(ITensor *input, ITensor *output, float beta, int32_t axis) +void NESoftmaxLayerGeneric<IS_LOG>::configure(ITensor *input, ITensor *output, float beta, int32_t reduce_end_axis) { // Perform validation step ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_ERROR_THROW_ON(NESoftmaxLayerGeneric::validate(input->info(), output->info(), beta, axis)); + ARM_COMPUTE_ERROR_THROW_ON(NESoftmaxLayerGeneric::validate(input->info(), output->info(), beta, reduce_end_axis)); - // Handle negative axis, negative index is used to specify axis from the end (e.g. -1 for the last axis). - axis = wrap_around(axis, static_cast<int32_t>(input->info()->num_dimensions())); + // Convert reduce-before axis (inclusive) to first n axes to reduce + size_t first_n_reduce_axes = dim_index_2_num_dims(reduce_end_axis, static_cast<int32_t>(input->info()->num_dimensions())); - // We don't need flattening only in the case the input is 2D and axis is 1 - _needs_flattening = axis != 1; + // We only need flattening when the number of axes to reduce is greater than 1 + _needs_flattening = first_n_reduce_axes > 1; // If we are dealing with a 4D tensor, we will: // - Flatten the input, so that we end up with a [width*height*depth] * batches 2D tensor @@ -93,7 +93,7 @@ void NESoftmaxLayerGeneric<IS_LOG>::configure(ITensor *input, ITensor *output, f _memory_group.manage(&_input_flattened); // Configure _flatten_kernel and _input_flattened - configure_reshape_input_kernel(input, output, axis); + configure_reshape_input_kernel(input, output, first_n_reduce_axes); } // We want to deal with a 2D input. Either it is the flattened version of the original input (4D case) @@ -145,16 +145,16 @@ void NESoftmaxLayerGeneric<IS_LOG>::configure(ITensor *input, ITensor *output, f } template <bool IS_LOG> -Status NESoftmaxLayerGeneric<IS_LOG>::validate(const ITensorInfo *input, const ITensorInfo *output, float beta, int32_t axis) +Status NESoftmaxLayerGeneric<IS_LOG>::validate(const ITensorInfo *input, const ITensorInfo *output, float beta, int32_t reduce_end_axis) { // Perform validation step ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_dimensions() > 4, "Only up to 4 dimensions are supported"); ARM_COMPUTE_UNUSED(beta); - ARM_COMPUTE_RETURN_ERROR_ON(axis < static_cast<int32_t>(-input->num_dimensions()) || static_cast<int32_t>(input->num_dimensions()) <= axis); + ARM_COMPUTE_RETURN_ERROR_ON(reduce_end_axis < static_cast<int32_t>(-input->num_dimensions()) || static_cast<int32_t>(input->num_dimensions()) <= reduce_end_axis); - // Handle negative axis, negative index is used to specify axis from the end (e.g. -1 for the last axis). - axis = wrap_around(axis, static_cast<int32_t>(input->num_dimensions())); + // Convert reduce-before axis (inclusive) to first n axes to reduce + size_t first_n_reduce_axes = dim_index_2_num_dims(reduce_end_axis, static_cast<int32_t>(input->num_dimensions())); // Create intermediate tensor info DataType tmp_data_type = input->data_type(); @@ -165,20 +165,20 @@ Status NESoftmaxLayerGeneric<IS_LOG>::validate(const ITensorInfo *input, const I const TensorInfo tensor_info_max_sum(input->clone()->set_tensor_shape(max_sum_shape).set_data_type(tmp_data_type).set_quantization_info(input->quantization_info()).set_is_resizable(true)); const TensorInfo dont_care; - const bool needs_flattening = (axis != 1); + const bool needs_flattening = (first_n_reduce_axes > 1); if(needs_flattening) { - const TensorShape shape_flatten = misc::shape_calculator::compute_softmax_shape(input, axis); + const TensorShape shape_flatten = misc::shape_calculator::compute_softmax_shape(input, first_n_reduce_axes); TensorInfo tensor_info_flat(input->clone()->set_tensor_shape(shape_flatten).set_is_resizable(true)); - if(axis != 3) + if(first_n_reduce_axes == 3) { - ARM_COMPUTE_RETURN_ON_ERROR(NEReshapeLayerKernel::validate(input, &tensor_info_flat)); + ARM_COMPUTE_RETURN_ON_ERROR(NEFlattenLayerKernel::validate(input, &tensor_info_flat)); } else { - ARM_COMPUTE_RETURN_ON_ERROR(NEFlattenLayerKernel::validate(input, &tensor_info_flat)); + ARM_COMPUTE_RETURN_ON_ERROR(NEReshapeLayerKernel::validate(input, &tensor_info_flat)); } } |