aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPablo Marquez Tello <pablo.tello@arm.com>2022-04-06 14:31:25 +0100
committerPablo Marquez Tello <pablo.tello@arm.com>2022-04-25 10:20:20 +0000
commitf55cca5f17da72004108f92047d3177b7cdb1a76 (patch)
tree08a1d83f51a017b03603f6c724f660fd1f084b2e
parentfa6877f94b12ec80235e55bcfe5a9b6fdc009cf0 (diff)
downloadComputeLibrary-f55cca5f17da72004108f92047d3177b7cdb1a76.tar.gz
Add LU_BOUNDED_RELU support for QSYMM16
Partially resolves MLCE-604 Change-Id: Id585ab19fe5cd8f61c07a0aae6faac6ba5545d6d Signed-off-by: Pablo Marquez Tello <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7379 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--src/cpu/kernels/CpuActivationKernel.cpp5
-rw-r--r--src/cpu/kernels/activation/generic/neon/qsymm16.cpp24
-rw-r--r--src/cpu/kernels/activation/generic/sve2/qsymm16.cpp16
-rw-r--r--tests/validation/NEON/ActivationLayer.cpp9
4 files changed, 48 insertions, 6 deletions
diff --git a/src/cpu/kernels/CpuActivationKernel.cpp b/src/cpu/kernels/CpuActivationKernel.cpp
index d5112b4ba9..74148071ae 100644
--- a/src/cpu/kernels/CpuActivationKernel.cpp
+++ b/src/cpu/kernels/CpuActivationKernel.cpp
@@ -109,11 +109,12 @@ static const std::array<ActivationLayerInfo::ActivationFunction, 7> qasymm8_acti
ActivationLayerInfo::ActivationFunction::LEAKY_RELU,
};
/* Supported activation in the 16-bit integer domain */
-static const std::array<ActivationLayerInfo::ActivationFunction, 3> qsymm16_activations =
+static const std::array<ActivationLayerInfo::ActivationFunction, 4> qsymm16_activations =
{
ActivationLayerInfo::ActivationFunction::LOGISTIC,
ActivationLayerInfo::ActivationFunction::TANH,
- ActivationLayerInfo::ActivationFunction::HARD_SWISH
+ ActivationLayerInfo::ActivationFunction::HARD_SWISH,
+ ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU
};
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const ActivationLayerInfo &activation_info)
diff --git a/src/cpu/kernels/activation/generic/neon/qsymm16.cpp b/src/cpu/kernels/activation/generic/neon/qsymm16.cpp
index 865b9f114e..ba14745938 100644
--- a/src/cpu/kernels/activation/generic/neon/qsymm16.cpp
+++ b/src/cpu/kernels/activation/generic/neon/qsymm16.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021 Arm Limited.
+ * Copyright (c) 2020-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -101,6 +101,22 @@ void neon_qsymm16_activation(const ITensor *src, ITensor *dst, const ActivationL
// Re-quantize to new output space
tmp = vquantize_int16(tmp_dep, qi_out.scale);
}
+
+ else if(act == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+ {
+ // De-quantize
+ const auto vin_deq = vdequantize_int16(vin, qi_in.scale);
+ // Perform activation
+ const float32x4x2_t tmp_dep =
+ {
+ {
+ wrapper::vmin(va_f32, wrapper::vmax(vb_f32, vin_deq.val[0])),
+ wrapper::vmin(va_f32, wrapper::vmax(vb_f32, vin_deq.val[1]))
+ }
+ };
+ // Re-quantize to new output space
+ tmp = vquantize_int16(tmp_dep, qi_out.scale);
+ }
else
{
ARM_COMPUTE_ERROR("Unsupported activation function");
@@ -125,6 +141,12 @@ void neon_qsymm16_activation(const ITensor *src, ITensor *dst, const ActivationL
tmp_f = a_f32 * std::tanh(b_f32 * tmp_f);
tmp = quantize_qsymm16(tmp_f, qi_out);
}
+ else if(act == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+ {
+ float tmp_f = dequantize_qsymm16(in, qi_in.scale);
+ tmp_f = std::min<float>(a_f32, std::max<float>(b_f32, tmp_f));
+ tmp = quantize_qsymm16(tmp_f, qi_out);
+ }
else
{
ARM_COMPUTE_ERROR("Unsupported activation function");
diff --git a/src/cpu/kernels/activation/generic/sve2/qsymm16.cpp b/src/cpu/kernels/activation/generic/sve2/qsymm16.cpp
index 41b5555448..ca6534604f 100644
--- a/src/cpu/kernels/activation/generic/sve2/qsymm16.cpp
+++ b/src/cpu/kernels/activation/generic/sve2/qsymm16.cpp
@@ -99,6 +99,22 @@ void sve2_qsymm16_activation(const ITensor *src, ITensor *dst, const ActivationL
// Re-quantize to new output space
tmp = svquantize_qsymm16_z(pg, tmp_dep, qi_out.scale);
}
+ else if(act == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+ {
+ // De-quantize
+ auto vin_deq = svdequantize_qsymm16_z(pg, vin, qi_in.scale);
+ // Perform activation
+ const svfloat32x2_t tmp_dep =
+ {
+ { {
+ svmin_f32_z(pg,va_f32, svmax_f32_z(pg,vb_f32, svget2_f32(vin_deq, 0))),
+ svmin_f32_z(pg,va_f32, svmax_f32_z(pg,vb_f32, svget2_f32(vin_deq, 1))),
+ }
+ }
+ };
+ // Re-quantize to new output space
+ tmp = svquantize_qsymm16_z(pg, tmp_dep, qi_out.scale);
+ }
else
{
ARM_COMPUTE_ERROR("Unsupported activation function");
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp
index 8d70ca5415..1f43de49d2 100644
--- a/tests/validation/NEON/ActivationLayer.cpp
+++ b/tests/validation/NEON/ActivationLayer.cpp
@@ -398,9 +398,12 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture<int8_t>, fram
TEST_SUITE_END() // QASYMM8_SIGNED
/** Input data sets. */
-const auto Int16QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationFunction", { ActivationLayerInfo::ActivationFunction::LOGISTIC,
- ActivationLayerInfo::ActivationFunction::TANH
- });
+const auto Int16QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationFunction",
+{
+ ActivationLayerInfo::ActivationFunction::LOGISTIC,
+ ActivationLayerInfo::ActivationFunction::TANH,
+ ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
+});
const auto Int16QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false }), Int16QuantizedActivationFunctionsDataset),
framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));