From f55cca5f17da72004108f92047d3177b7cdb1a76 Mon Sep 17 00:00:00 2001 From: Pablo Marquez Tello Date: Wed, 6 Apr 2022 14:31:25 +0100 Subject: Add LU_BOUNDED_RELU support for QSYMM16 Partially resolves MLCE-604 Change-Id: Id585ab19fe5cd8f61c07a0aae6faac6ba5545d6d Signed-off-by: Pablo Marquez Tello Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7379 Tested-by: Arm Jenkins Reviewed-by: Michalis Spyrou Comments-Addressed: Arm Jenkins --- src/cpu/kernels/CpuActivationKernel.cpp | 5 +++-- .../kernels/activation/generic/neon/qsymm16.cpp | 24 +++++++++++++++++++++- .../kernels/activation/generic/sve2/qsymm16.cpp | 16 +++++++++++++++ tests/validation/NEON/ActivationLayer.cpp | 9 +++++--- 4 files changed, 48 insertions(+), 6 deletions(-) diff --git a/src/cpu/kernels/CpuActivationKernel.cpp b/src/cpu/kernels/CpuActivationKernel.cpp index d5112b4ba9..74148071ae 100644 --- a/src/cpu/kernels/CpuActivationKernel.cpp +++ b/src/cpu/kernels/CpuActivationKernel.cpp @@ -109,11 +109,12 @@ static const std::array qasymm8_acti ActivationLayerInfo::ActivationFunction::LEAKY_RELU, }; /* Supported activation in the 16-bit integer domain */ -static const std::array qsymm16_activations = +static const std::array qsymm16_activations = { ActivationLayerInfo::ActivationFunction::LOGISTIC, ActivationLayerInfo::ActivationFunction::TANH, - ActivationLayerInfo::ActivationFunction::HARD_SWISH + ActivationLayerInfo::ActivationFunction::HARD_SWISH, + ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU }; Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const ActivationLayerInfo &activation_info) diff --git a/src/cpu/kernels/activation/generic/neon/qsymm16.cpp b/src/cpu/kernels/activation/generic/neon/qsymm16.cpp index 865b9f114e..ba14745938 100644 --- a/src/cpu/kernels/activation/generic/neon/qsymm16.cpp +++ b/src/cpu/kernels/activation/generic/neon/qsymm16.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2021 Arm Limited. + * Copyright (c) 2020-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -101,6 +101,22 @@ void neon_qsymm16_activation(const ITensor *src, ITensor *dst, const ActivationL // Re-quantize to new output space tmp = vquantize_int16(tmp_dep, qi_out.scale); } + + else if(act == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) + { + // De-quantize + const auto vin_deq = vdequantize_int16(vin, qi_in.scale); + // Perform activation + const float32x4x2_t tmp_dep = + { + { + wrapper::vmin(va_f32, wrapper::vmax(vb_f32, vin_deq.val[0])), + wrapper::vmin(va_f32, wrapper::vmax(vb_f32, vin_deq.val[1])) + } + }; + // Re-quantize to new output space + tmp = vquantize_int16(tmp_dep, qi_out.scale); + } else { ARM_COMPUTE_ERROR("Unsupported activation function"); @@ -125,6 +141,12 @@ void neon_qsymm16_activation(const ITensor *src, ITensor *dst, const ActivationL tmp_f = a_f32 * std::tanh(b_f32 * tmp_f); tmp = quantize_qsymm16(tmp_f, qi_out); } + else if(act == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) + { + float tmp_f = dequantize_qsymm16(in, qi_in.scale); + tmp_f = std::min(a_f32, std::max(b_f32, tmp_f)); + tmp = quantize_qsymm16(tmp_f, qi_out); + } else { ARM_COMPUTE_ERROR("Unsupported activation function"); diff --git a/src/cpu/kernels/activation/generic/sve2/qsymm16.cpp b/src/cpu/kernels/activation/generic/sve2/qsymm16.cpp index 41b5555448..ca6534604f 100644 --- a/src/cpu/kernels/activation/generic/sve2/qsymm16.cpp +++ b/src/cpu/kernels/activation/generic/sve2/qsymm16.cpp @@ -99,6 +99,22 @@ void sve2_qsymm16_activation(const ITensor *src, ITensor *dst, const ActivationL // Re-quantize to new output space tmp = svquantize_qsymm16_z(pg, tmp_dep, qi_out.scale); } + else if(act == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) + { + // De-quantize + auto vin_deq = svdequantize_qsymm16_z(pg, vin, qi_in.scale); + // Perform activation + const svfloat32x2_t tmp_dep = + { + { { + svmin_f32_z(pg,va_f32, svmax_f32_z(pg,vb_f32, svget2_f32(vin_deq, 0))), + svmin_f32_z(pg,va_f32, svmax_f32_z(pg,vb_f32, svget2_f32(vin_deq, 1))), + } + } + }; + // Re-quantize to new output space + tmp = svquantize_qsymm16_z(pg, tmp_dep, qi_out.scale); + } else { ARM_COMPUTE_ERROR("Unsupported activation function"); diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index 8d70ca5415..1f43de49d2 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -398,9 +398,12 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture, fram TEST_SUITE_END() // QASYMM8_SIGNED /** Input data sets. */ -const auto Int16QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationFunction", { ActivationLayerInfo::ActivationFunction::LOGISTIC, - ActivationLayerInfo::ActivationFunction::TANH - }); +const auto Int16QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationFunction", +{ + ActivationLayerInfo::ActivationFunction::LOGISTIC, + ActivationLayerInfo::ActivationFunction::TANH, + ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, +}); const auto Int16QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false }), Int16QuantizedActivationFunctionsDataset), framework::dataset::make("AlphaBeta", { 0.5f, 1.f })); -- cgit v1.2.1