From 64ebe5b392b8135ec939b63596ffb8567a3e3248 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Fri, 1 Sep 2017 17:44:24 +0100 Subject: COMPMID-519: Add support for Lower and Upper Bounded RELU for CL/NEON Change-Id: I7b16216ac59c899a33942bf17757b54535256d7a Reviewed-on: http://mpd-gerrit.cambridge.arm.com/86172 Tested-by: Kaizen Reviewed-by: Anthony Barbier --- src/core/NEON/kernels/NEActivationLayerKernel.cpp | 36 +++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'src/core/NEON/kernels/NEActivationLayerKernel.cpp') diff --git a/src/core/NEON/kernels/NEActivationLayerKernel.cpp b/src/core/NEON/kernels/NEActivationLayerKernel.cpp index 3195411e18..4ff26c0c67 100644 --- a/src/core/NEON/kernels/NEActivationLayerKernel.cpp +++ b/src/core/NEON/kernels/NEActivationLayerKernel.cpp @@ -73,6 +73,7 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat { ActivationFunction::LOGISTIC, &NEActivationLayerKernel::activation }, { ActivationFunction::RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::BOUNDED_RELU, &NEActivationLayerKernel::activation }, + { ActivationFunction::LU_BOUNDED_RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::LEAKY_RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::SOFT_RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::SQRT, &NEActivationLayerKernel::activation }, @@ -89,6 +90,7 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat { ActivationFunction::LOGISTIC, &NEActivationLayerKernel::activation }, { ActivationFunction::RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::BOUNDED_RELU, &NEActivationLayerKernel::activation }, + { ActivationFunction::LU_BOUNDED_RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::SOFT_RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::SQRT, &NEActivationLayerKernel::activation }, { ActivationFunction::SQUARE, &NEActivationLayerKernel::activation }, @@ -104,6 +106,7 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat { ActivationFunction::LOGISTIC, &NEActivationLayerKernel::activation }, { ActivationFunction::RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::BOUNDED_RELU, &NEActivationLayerKernel::activation }, + { ActivationFunction::LU_BOUNDED_RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::LEAKY_RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::SOFT_RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::SQRT, &NEActivationLayerKernel::activation }, @@ -118,6 +121,7 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat { ActivationFunction::LOGISTIC, &NEActivationLayerKernel::activation }, { ActivationFunction::RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::BOUNDED_RELU, &NEActivationLayerKernel::activation }, + { ActivationFunction::LU_BOUNDED_RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::LEAKY_RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::SOFT_RELU, &NEActivationLayerKernel::activation }, { ActivationFunction::SQRT, &NEActivationLayerKernel::activation }, @@ -211,6 +215,15 @@ typename std::enable_if::value, void>::type NEActivat } }; break; + case ActivationFunction::LU_BOUNDED_RELU: + tmp = + { + { + vminq_f16(a, vmaxq_f16(b, in.val[0])), + vminq_f16(a, vmaxq_f16(b, in.val[1])) + } + }; + break; case ActivationFunction::LINEAR: tmp = { @@ -370,6 +383,17 @@ typename std::enable_if::value, void>::type NEActivationL } }; break; + case ActivationFunction::LU_BOUNDED_RELU: + tmp = + { + { + vminq_f32(a, vmaxq_f32(b, in.val[0])), + vminq_f32(a, vmaxq_f32(b, in.val[1])), + vminq_f32(a, vmaxq_f32(b, in.val[2])), + vminq_f32(a, vmaxq_f32(b, in.val[3])), + } + }; + break; case ActivationFunction::LEAKY_RELU: tmp = { @@ -471,6 +495,9 @@ typename std::enable_if::value, void>::type NEActivation case ActivationFunction::BOUNDED_RELU: tmp = vminq_qs8(a, vmaxq_qs8(CONST_0, in)); break; + case ActivationFunction::LU_BOUNDED_RELU: + tmp = vminq_qs8(a, vmaxq_qs8(b, in)); + break; case ActivationFunction::LEAKY_RELU: tmp = vbslq_s8(vcgtq_s8(in, CONST_0), in, vmulq_qs8(a, in, fixed_point_position)); break; @@ -562,6 +589,15 @@ typename std::enable_if::value, void>::type NEActivati } }; break; + case ActivationFunction::LU_BOUNDED_RELU: + tmp = + { + { + vminq_qs16(a, vmaxq_qs16(b, in.val[0])), + vminq_qs16(a, vmaxq_qs16(b, in.val[1])), + } + }; + break; case ActivationFunction::LEAKY_RELU: tmp = { -- cgit v1.2.1