aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/NEActivationLayerKernel.cpp
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2018-01-23 16:55:24 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:43:42 +0000
commitdde3ad94dc11fae29dba862a1ad657f551f36763 (patch)
treeb9fe87fb339326aaa68fde0a0b90996c4d2e3cc9 /src/core/NEON/kernels/NEActivationLayerKernel.cpp
parent967f86dc0931aa1b59f477cb92911f7d06640c27 (diff)
downloadComputeLibrary-dde3ad94dc11fae29dba862a1ad657f551f36763.tar.gz
COMPMID-842: Add NEON QASYMM8 RELU Activation
Change-Id: I7197d2ad7ac08112eba1570a257ad011b1ce0b75 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/117404 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/NEActivationLayerKernel.cpp')
-rw-r--r--src/core/NEON/kernels/NEActivationLayerKernel.cpp28
1 files changed, 19 insertions, 9 deletions
diff --git a/src/core/NEON/kernels/NEActivationLayerKernel.cpp b/src/core/NEON/kernels/NEActivationLayerKernel.cpp
index 9670b7798c..37d694d1b4 100644
--- a/src/core/NEON/kernels/NEActivationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEActivationLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -109,8 +109,9 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr));
- ARM_COMPUTE_ERROR_ON_MSG((input->info()->data_type() == DataType::QASYMM8) && (activation_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU),
- "For QASYMM8 only lower/upper bounded relu is supported");
+ ARM_COMPUTE_ERROR_ON_MSG((input->info()->data_type() == DataType::QASYMM8) && (activation_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+ && (activation_info.activation() != ActivationLayerInfo::ActivationFunction::RELU),
+ "For QASYMM8 only relu and lower/upper bounded relu are supported");
// Activation functions : FP32
static std::map<ActivationFunction, ActivationFunctionExecutorPtr> act_map_f32 =
@@ -179,6 +180,7 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat
static std::map<ActivationFunction, ActivationFunctionExecutorPtr> act_map_qasymm8 =
{
{ ActivationFunction::LU_BOUNDED_RELU, &NEActivationLayerKernel::activation<ActivationFunction::LU_BOUNDED_RELU, qasymm8_t> },
+ { ActivationFunction::RELU, &NEActivationLayerKernel::activation<ActivationFunction::RELU, qasymm8_t> },
};
switch(input->info()->data_type())
@@ -561,12 +563,14 @@ typename std::enable_if<std::is_same<T, int8_t>::value, void>::type NEActivation
template <ActivationLayerInfo::ActivationFunction F, typename T>
typename std::enable_if<std::is_same<T, qasymm8_t>::value, void>::type NEActivationLayerKernel::activation(const Window &window)
{
- Iterator input(_input, window);
- Iterator output(_output, window);
- const QuantizationInfo qi_in = _input->info()->quantization_info();
- const QuantizationInfo qi_out = _output->info()->quantization_info();
- const qasymm8x16_t a = vdupq_n_u8(sqcvt_qasymm8_f32(_act_info.a(), qi_in.scale, qi_in.offset));
- const qasymm8x16_t b = vdupq_n_u8(sqcvt_qasymm8_f32(_act_info.b(), qi_in.scale, qi_in.offset));
+ Iterator input(_input, window);
+ Iterator output(_output, window);
+ const QuantizationInfo qi_in = _input->info()->quantization_info();
+ const QuantizationInfo qi_out = _output->info()->quantization_info();
+ const qasymm8x16_t a = vdupq_n_u8(sqcvt_qasymm8_f32(_act_info.a(), qi_in.scale, qi_in.offset));
+ const qasymm8x16_t b = vdupq_n_u8(sqcvt_qasymm8_f32(_act_info.b(), qi_in.scale, qi_in.offset));
+ static const qasymm8x16_t CONST_0 = vdupq_n_u8(sqcvt_qasymm8_f32(0.f, qi_in.scale, qi_in.offset));
+
// Initialise scale/offset for re-quantization
float s = qi_in.scale / qi_out.scale;
float o = -qi_in.offset * s + qi_out.offset;
@@ -589,6 +593,12 @@ typename std::enable_if<std::is_same<T, qasymm8_t>::value, void>::type NEActivat
// Re-quantize to new output space
tmp = vmlaq_qasymm8(tmp, vs, vo);
break;
+ case ActivationFunction::RELU:
+ // Perform activation
+ tmp = vmaxq_u8(CONST_0, in);
+ // Re-quantize to new output space
+ tmp = vmlaq_qasymm8(tmp, vs, vo);
+ break;
default:
ARM_COMPUTE_ERROR("Function not implemented");
break;