aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorSang-Hoon Park <sang-hoon.park@arm.com>2020-07-07 09:36:09 +0100
committerSang-Hoon Park <sang-hoon.park@arm.com>2020-07-08 10:00:17 +0000
commita0205b987509d239b1635024fe8f334a4534f56e (patch)
treeb1751b74d14073d2c711de486d3f0efb1a4d2c6c /src
parentf9b595adbdc3f6f51ffa2c1f2aa70d0262d0db2d (diff)
downloadComputeLibrary-a0205b987509d239b1635024fe8f334a4534f56e.tar.gz
COMPMID-3574: add logarithm to LogSoftmaxLayer
Missed logarithm for the summation is added to NEON, CL and reference backends. To avoid complex changes, log softmax layer on CL backend doesn't support quantized data types. Tests and doxygen comments are modified accordingly. Change-Id: Iafd29291be8b81345cb4999b2668dbc3ae0c3345 Signed-off-by: Sang-Hoon Park <sang-hoon.park@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3517 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/CL/cl_kernels/softmax_layer.cl3
-rw-r--r--src/core/CL/cl_kernels/softmax_layer_quantized.cl13
-rw-r--r--src/core/CL/kernels/CLSoftmaxLayerKernel.cpp1
-rw-r--r--src/core/NEON/kernels/NESoftmaxLayerKernel.cpp8
4 files changed, 14 insertions, 11 deletions
diff --git a/src/core/CL/cl_kernels/softmax_layer.cl b/src/core/CL/cl_kernels/softmax_layer.cl
index 767cf4c4f7..0c9f8c1c66 100644
--- a/src/core/CL/cl_kernels/softmax_layer.cl
+++ b/src/core/CL/cl_kernels/softmax_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -112,6 +112,7 @@ __kernel void softmax_layer_norm(
VEC_DATA_TYPE(DATA_TYPE, 16)
data = vload16(0, (__global DATA_TYPE *)offset(&src, 0, 0));
#ifdef LOG_SOFTMAX
+ sum_val = log(sum_val);
vstore16(SUB_OP(data, sum_val, DATA_TYPE, 16), 0, (__global DATA_TYPE *)offset(&dst, 0, 0));
#else /* LOG_SOFTMAX */
vstore16(DIV_OP(data, sum_val, DATA_TYPE, 16), 0, (__global DATA_TYPE *)offset(&dst, 0, 0));
diff --git a/src/core/CL/cl_kernels/softmax_layer_quantized.cl b/src/core/CL/cl_kernels/softmax_layer_quantized.cl
index 5d35e50b1f..81e7b896d5 100644
--- a/src/core/CL/cl_kernels/softmax_layer_quantized.cl
+++ b/src/core/CL/cl_kernels/softmax_layer_quantized.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -570,14 +570,12 @@ __kernel void softmax_layer_norm_quantized(
int sum_val = *((__global int *)offset(&sum, 0, get_global_id(1)));
// It will be better to calculate this in prev layer and pass here as parameter
-#ifndef LOG_SOFTMAX
uint sum_val_u = convert_uint(sum_val);
int headroom_plus_one = clz(sum_val_u);
int num_bits_over_unit = EXP_ACCUMULATION_INT_BITS - headroom_plus_one;
int shifted_sum_minus_one_1 = convert_int((sum_val_u << headroom_plus_one) - (1u << 31));
int16 shifted_sum_minus_one = shifted_sum_minus_one_1;
int16 shifted_scale = ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1(shifted_sum_minus_one, 16);
-#endif /* LOG_SOFTMAX */
// It was already calculated in prev layer, should be stored into tmp output and reused
int16 data_diff = vload16(0, (__global int *)offset(&src, 0, 0));
@@ -589,18 +587,13 @@ __kernel void softmax_layer_norm_quantized(
}
#endif /* defined(INPUT_BETA_MULTIPLIER) && defined(INPUT_BETA_LEFT_SHIFT) */
-#ifdef LOG_SOFTMAX
- long16 data = SUB_OP(convert_long16(data_diff_mult), (long16)(sum_val), long, 16);
- data = select(0L, data, convert_long16(data_diff) >= (long16)(DIFF_MIN));
-#else /* LOG_SOFTMAX */
int16 data = ASYMM_EXP_ON_NEGATIVE_VALUES(data_diff_mult, SCALED_DIFF_INT_BITS, 16);
data = ASYMM_MULT(shifted_scale, data, 16);
data = ASYMM_ROUNDING_DIVIDE_BY_POW2(data, num_bits_over_unit + 31 - 8, 16);
#ifdef QASYMM8_SIGNED
- data = ADD_OP(data, (int16)(MIN_VALUE), int, 16);
+ data = ADD_OP(data, (int16)(MIN_VALUE), int, 16);
#endif /* QASYMM8_SIGNED */
- data = select(MIN_VALUE, data, data_diff >= (int16)(DIFF_MIN));
-#endif /* LOG_SOFTMAX */
+ data = select(MIN_VALUE, data, data_diff >= (int16)(DIFF_MIN));
vstore16(CONVERT_SAT(data, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)offset(&dst, 0, 0));
}
diff --git a/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp b/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp
index 09deb94a85..85d70b04d0 100644
--- a/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp
+++ b/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp
@@ -129,6 +129,7 @@ Status validate_arguments_1DNorm(const ITensorInfo *input, const ITensorInfo *su
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32, DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(sum, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, sum);
+ ARM_COMPUTE_RETURN_ERROR_ON(info.is_log && !is_data_type_float(info.input_data_type));
// Note: output should always have a scale of 1/256 and offset 0
const QuantizationInfo allowed_quantization_info = get_softmax_output_quantization_info(info.input_data_type, info.is_log);
diff --git a/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp b/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp
index 41bf03ad1d..35e5973aff 100644
--- a/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp
+++ b/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp
@@ -368,6 +368,10 @@ void logits_1d_softmax_qasymm8(const ITensor &in, const ITensor &max, void *cons
{
sum_inversed = 256.f / sum;
}
+ else
+ {
+ sum = std::log(sum);
+ }
}
/* Normalize exponentials */
@@ -516,6 +520,10 @@ void logits_1d_softmax_float(const ITensor &in, const ITensor &max, void *const
{
sum_inversed = T(1) / sum;
}
+ else
+ {
+ sum = static_cast<T>(std::log(sum));
+ }
}
/* Normalize exponentials */