aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/NEActivationLayerKernel.cpp
diff options
context:
space:
mode:
authorMichel Iwaniec <michel.iwaniec@arm.com>2017-11-29 10:48:23 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:17 +0000
commit5dfeae62f89eefdc241887c3e67cd1c04ec0b6a7 (patch)
treed6b5d40353aa68aeda803c809812fd6e208c3e7f /src/core/NEON/kernels/NEActivationLayerKernel.cpp
parent7f0f790ae7f5dd044a5d7564492583b8df974a11 (diff)
downloadComputeLibrary-5dfeae62f89eefdc241887c3e67cd1c04ec0b6a7.tar.gz
IVGCVSW-820: Add QASYMM8 support to NeonActivationLayerKernel
Change-Id: Ic3881e97b4fcbae0ac287a1e010cfc6f0fd8d7d1 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/112139 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/NEActivationLayerKernel.cpp')
-rw-r--r--src/core/NEON/kernels/NEActivationLayerKernel.cpp56
1 files changed, 55 insertions, 1 deletions
diff --git a/src/core/NEON/kernels/NEActivationLayerKernel.cpp b/src/core/NEON/kernels/NEActivationLayerKernel.cpp
index 6ea504a173..9670b7798c 100644
--- a/src/core/NEON/kernels/NEActivationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEActivationLayerKernel.cpp
@@ -26,8 +26,10 @@
#include "arm_compute/core/FixedPoint.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/NEAsymm.h"
#include "arm_compute/core/NEON/NEFixedPoint.h"
#include "arm_compute/core/NEON/NEMath.h"
+#include "arm_compute/core/QAsymm8.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
@@ -44,7 +46,7 @@ namespace
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QASYMM8, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
// Checks performed when output is configured
if((output != nullptr) && (output->total_size() != 0))
@@ -107,6 +109,9 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr));
+ ARM_COMPUTE_ERROR_ON_MSG((input->info()->data_type() == DataType::QASYMM8) && (activation_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU),
+ "For QASYMM8 only lower/upper bounded relu is supported");
+
// Activation functions : FP32
static std::map<ActivationFunction, ActivationFunctionExecutorPtr> act_map_f32 =
{
@@ -170,9 +175,17 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat
{ ActivationFunction::SQUARE, &NEActivationLayerKernel::activation<ActivationFunction::SQUARE, qint16_t> },
{ ActivationFunction::TANH, &NEActivationLayerKernel::activation<ActivationFunction::TANH, qint16_t> },
};
+ // Activation functions : QASYMM8
+ static std::map<ActivationFunction, ActivationFunctionExecutorPtr> act_map_qasymm8 =
+ {
+ { ActivationFunction::LU_BOUNDED_RELU, &NEActivationLayerKernel::activation<ActivationFunction::LU_BOUNDED_RELU, qasymm8_t> },
+ };
switch(input->info()->data_type())
{
+ case DataType::QASYMM8:
+ _func = act_map_qasymm8[activation_info.activation()];
+ break;
case DataType::QS8:
_func = act_map_qs8[activation_info.activation()];
break;
@@ -546,6 +559,47 @@ typename std::enable_if<std::is_same<T, int8_t>::value, void>::type NEActivation
}
template <ActivationLayerInfo::ActivationFunction F, typename T>
+typename std::enable_if<std::is_same<T, qasymm8_t>::value, void>::type NEActivationLayerKernel::activation(const Window &window)
+{
+ Iterator input(_input, window);
+ Iterator output(_output, window);
+ const QuantizationInfo qi_in = _input->info()->quantization_info();
+ const QuantizationInfo qi_out = _output->info()->quantization_info();
+ const qasymm8x16_t a = vdupq_n_u8(sqcvt_qasymm8_f32(_act_info.a(), qi_in.scale, qi_in.offset));
+ const qasymm8x16_t b = vdupq_n_u8(sqcvt_qasymm8_f32(_act_info.b(), qi_in.scale, qi_in.offset));
+ // Initialise scale/offset for re-quantization
+ float s = qi_in.scale / qi_out.scale;
+ float o = -qi_in.offset * s + qi_out.offset;
+ float32x4_t vs = vdupq_n_f32(s);
+ float32x4_t vo = vdupq_n_f32(o);
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ const auto input_ptr = reinterpret_cast<const qasymm8_t *>(input.ptr());
+ const auto output_ptr = reinterpret_cast<qasymm8_t *>(output.ptr());
+
+ const qasymm8x16_t in = vld1q_u8(input_ptr);
+ qasymm8x16_t tmp = {};
+
+ switch(F)
+ {
+ case ActivationFunction::LU_BOUNDED_RELU:
+ // Perform activation
+ tmp = vminq_u8(a, vmaxq_u8(b, in));
+ // Re-quantize to new output space
+ tmp = vmlaq_qasymm8(tmp, vs, vo);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Function not implemented");
+ break;
+ }
+
+ vst1q_u8(output_ptr, tmp);
+ },
+ input, output);
+}
+
+template <ActivationLayerInfo::ActivationFunction F, typename T>
typename std::enable_if<std::is_same<T, qint16_t>::value, void>::type NEActivationLayerKernel::activation(const Window &window)
{
Iterator input(_input, window);