aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL/functions/CLSoftmaxLayer.cpp
diff options
context:
space:
mode:
authorgiuros01 <giuseppe.rossini@arm.com>2018-09-03 09:53:53 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commitefbf6c8fd54159b26eda43eea7a12fce491ca13a (patch)
treef24f63d73703ddcb5fe0ea3ccef101660a9eb9a4 /src/runtime/CL/functions/CLSoftmaxLayer.cpp
parent477531c258801caf3cce44eb3e43df611b42fc6d (diff)
downloadComputeLibrary-efbf6c8fd54159b26eda43eea7a12fce491ca13a.tar.gz
[COMPMID-386] Github: Support SoftmaxLayer on different number of dimensions?
Change-Id: I7422b977538ff29930a90f078badc2edee78af93 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/146638 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/runtime/CL/functions/CLSoftmaxLayer.cpp')
-rw-r--r--src/runtime/CL/functions/CLSoftmaxLayer.cpp83
1 files changed, 57 insertions, 26 deletions
diff --git a/src/runtime/CL/functions/CLSoftmaxLayer.cpp b/src/runtime/CL/functions/CLSoftmaxLayer.cpp
index 3a7d6c770b..d6718467d5 100644
--- a/src/runtime/CL/functions/CLSoftmaxLayer.cpp
+++ b/src/runtime/CL/functions/CLSoftmaxLayer.cpp
@@ -36,34 +36,48 @@
namespace arm_compute
{
CLSoftmaxLayer::CLSoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _max_shift_exp_sum_kernel(), _norm_kernel(), _flatten_kernel(), _reshape_kernel(), _max(), _sum(), _tmp(), _input_flat(), _output_flat(),
+ : _memory_group(std::move(memory_manager)), _max_shift_exp_sum_kernel(), _norm_kernel(), _flatten_kernel_ptr(), _reshape_kernel(), _max(), _sum(), _tmp(), _input_flattened(), _output_flattened(),
_needs_flattening(false)
{
}
-void CLSoftmaxLayer::configure_flatten_kernel(const ICLTensor *input, const ICLTensor *output)
+void CLSoftmaxLayer::configure_reshape_input_kernel(const ICLTensor *input, const ICLTensor *output, size_t axis)
{
// Flatten the input
- const TensorShape shape_flatten = misc::shape_calculator::compute_flatten_shape(input->info());
+ const TensorShape shape_flatten = misc::shape_calculator::compute_softmax_shape(input->info(), axis);
// Initialize the flat input
- _input_flat.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten));
+ _input_flattened.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten));
- // Configure the flatten_kernel
- _flatten_kernel.configure(input, &_input_flat);
+ // If we need to flatten the input, we can use CLFlattenKernel or CLReshapeKernel
+ // If flattening on the third axes, we use CLFlattenKernel.
+ // In all other cases we have to use CLReshapeKernel
+ if(axis != 3)
+ {
+ auto reshape_kernel_ptr = support::cpp14::make_unique<CLReshapeLayerKernel>();
+ reshape_kernel_ptr->configure(input, &_input_flattened);
+ _flatten_kernel_ptr = std::move(reshape_kernel_ptr);
+ }
+ else
+ {
+ auto flatten_kernel_ptr = support::cpp14::make_unique<CLFlattenLayerKernel>();
+ flatten_kernel_ptr->configure(input, &_input_flattened);
+ _flatten_kernel_ptr = std::move(flatten_kernel_ptr);
+ }
// We need to init the output tensor here. Indeed, the reshape kernel expects
// both tensors to be already initialized
auto_init_if_empty(*output->info(), *input->info()->clone());
}
-void CLSoftmaxLayer::configure(const ICLTensor *input, ICLTensor *output, float beta)
+void CLSoftmaxLayer::configure(const ICLTensor *input, ICLTensor *output, float beta, size_t axis)
{
// Perform validation step
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_ERROR_THROW_ON(CLSoftmaxLayer::validate(input->info(), output->info()));
+ ARM_COMPUTE_ERROR_THROW_ON(CLSoftmaxLayer::validate(input->info(), output->info(), beta, axis));
- _needs_flattening = input->info()->num_dimensions() > 2;
+ // We don't need flattening only in the case the input is 2D and axis is 1
+ _needs_flattening = axis != 1;
// If we are dealing with a 4D tensor, we will:
// - Flatten the input, so that we end up with a [width*height*depth] * batches 2D tensor
@@ -71,16 +85,16 @@ void CLSoftmaxLayer::configure(const ICLTensor *input, ICLTensor *output, float
// - Reshape the flattened output into the real output
if(_needs_flattening)
{
- // Add to the memory manager _input_flat
- _memory_group.manage(&_input_flat);
+ // Add to the memory manager _input_flattened
+ _memory_group.manage(&_input_flattened);
- // Cofigure _flatten_kernel and _input_flat
- configure_flatten_kernel(input, output);
+ // Cofigure _flatten_kernel and _input_flattened
+ configure_reshape_input_kernel(input, output, axis);
}
// We want to deal with a 2D input. Either it is the flattened version of the original input (4D case)
// or it is the original input case (2D case)
- const ICLTensor *input_2D = (_needs_flattening ? &_input_flat : input);
+ const ICLTensor *input_2D = (_needs_flattening ? &_input_flattened : input);
// Create intermediate tensors shapes
TensorInfo input_info = input_2D->info()->clone()->reset_padding().set_is_resizable(true);
@@ -106,18 +120,18 @@ void CLSoftmaxLayer::configure(const ICLTensor *input, ICLTensor *output, float
if(_needs_flattening)
{
- // Add to the memory manager _output_flat
- _memory_group.manage(&_output_flat);
+ // Add to the memory manager _output_flattened
+ _memory_group.manage(&_output_flattened);
// The normalization kernel stores the result in a flat output tensor
- _norm_kernel.configure(&_tmp, &_sum, &_output_flat, beta);
+ _norm_kernel.configure(&_tmp, &_sum, &_output_flattened, beta);
// Reshape the flat output into a the requested (4D) output
- _reshape_kernel.configure(&_output_flat, output);
+ _reshape_kernel.configure(&_output_flattened, output);
// Allocate the intermediate flat tensors
- _input_flat.allocator()->allocate();
- _output_flat.allocator()->allocate();
+ _input_flattened.allocator()->allocate();
+ _output_flattened.allocator()->allocate();
}
else
{
@@ -131,10 +145,11 @@ void CLSoftmaxLayer::configure(const ICLTensor *input, ICLTensor *output, float
_sum.allocator()->allocate();
}
-Status CLSoftmaxLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
+Status CLSoftmaxLayer::validate(const ITensorInfo *input, const ITensorInfo *output, float beta, size_t axis)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_dimensions() > 4, "Only up to 4 dimensions are supported");
+ ARM_COMPUTE_UNUSED(beta);
// Create intermediate tensor info
DataType tmp_data_type = is_data_type_quantized_asymmetric(input->data_type()) ? DataType::S32 : input->data_type();
@@ -145,26 +160,42 @@ Status CLSoftmaxLayer::validate(const ITensorInfo *input, const ITensorInfo *out
TensorInfo tensor_info_max(input->clone()->set_tensor_shape(max_sum_shape).set_is_resizable(true));
TensorInfo tensor_info_sum(input->clone()->set_tensor_shape(max_sum_shape).set_data_type(tmp_data_type).set_quantization_info(QuantizationInfo()).set_is_resizable(true));
- const TensorShape shape_flatten = misc::shape_calculator::compute_flatten_shape(input);
- TensorInfo tensor_info_flat(input->clone()->set_tensor_shape(shape_flatten).set_is_resizable(true));
+ const bool needs_flattening = (axis != 1);
- if(input->num_dimensions() > 2) // needs flattening
+ if(needs_flattening)
{
- ARM_COMPUTE_RETURN_ON_ERROR(CLFlattenLayerKernel::validate(input, &tensor_info_flat));
+ const TensorShape shape_flatten = misc::shape_calculator::compute_softmax_shape(input, axis);
+ TensorInfo tensor_info_flat(input->clone()->set_tensor_shape(shape_flatten).set_is_resizable(true));
+
+ if(axis != 3)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayerKernel::validate(input, &tensor_info_flat));
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(CLFlattenLayerKernel::validate(input, &tensor_info_flat));
+ }
}
ARM_COMPUTE_RETURN_ON_ERROR(CLLogits1DMaxShiftExpSumKernel::validate(input, &tensor_info_max, &tensor_info_tmp, &tensor_info_sum));
ARM_COMPUTE_RETURN_ON_ERROR(CLLogits1DNormKernel::validate(&tensor_info_tmp, &tensor_info_sum, output));
+ if(needs_flattening)
+ {
+ const TensorShape shape_flatten = misc::shape_calculator::compute_softmax_shape(input);
+ TensorInfo tensor_info_flat(input->clone()->set_tensor_shape(shape_flatten).set_is_resizable(true));
+ }
+
return Status{};
}
void CLSoftmaxLayer::run()
{
_memory_group.acquire();
+
if(_needs_flattening)
{
- CLScheduler::get().enqueue(_flatten_kernel, false);
+ CLScheduler::get().enqueue(*_flatten_kernel_ptr, false);
}
CLScheduler::get().enqueue(_max_shift_exp_sum_kernel, false);