aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-09-01 17:44:24 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit64ebe5b392b8135ec939b63596ffb8567a3e3248 (patch)
tree9291ce93dd474eee8d2d59b7b391e62b32e56cde
parenta09de0c8b2ed0f1481502d3b023375609362d9e3 (diff)
downloadComputeLibrary-64ebe5b392b8135ec939b63596ffb8567a3e3248.tar.gz
COMPMID-519: Add support for Lower and Upper Bounded RELU for CL/NEON
Change-Id: I7b16216ac59c899a33942bf17757b54535256d7a Reviewed-on: http://mpd-gerrit.cambridge.arm.com/86172 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/Types.h25
-rw-r--r--src/core/CL/cl_kernels/activation_layer.cl5
-rw-r--r--src/core/NEON/kernels/NEActivationLayerKernel.cpp36
-rw-r--r--src/core/Utils.cpp1
-rw-r--r--tests/TypePrinter.h3
-rw-r--r--tests/datasets/ActivationFunctionsDataset.h1
-rw-r--r--tests/validation/CPP/ActivationLayer.cpp6
7 files changed, 65 insertions, 12 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 5eaaee6b7b..b90798e5ff 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -509,24 +509,25 @@ public:
/** Available activation functions */
enum class ActivationFunction
{
- LOGISTIC, /**< Logistic */
- TANH, /**< Hyperbolic tangent */
- RELU, /**< Rectifier */
- BOUNDED_RELU, /**< Bounded Rectifier */
- LEAKY_RELU, /**< Leaky Rectifier */
- SOFT_RELU, /**< Soft Rectifier */
- ABS, /**< Absolute */
- SQUARE, /**< Square */
- SQRT, /**< Square root */
- LINEAR /**< Linear */
+ LOGISTIC, /**< Logistic ( \f$ f(x) = \frac{1}{1 + e^{-x}} \f$ ) */
+ TANH, /**< Hyperbolic tangent ( \f$ f(x) = a \cdot tanh(b \cdot x) \f$ ) */
+ RELU, /**< Rectifier ( \f$ f(x) = max(0,x) \f$ ) */
+ BOUNDED_RELU, /**< Upper Bounded Rectifier ( \f$ f(x) = min(a, max(0,x)) \f$ ) */
+ LU_BOUNDED_RELU, /**< Lower and Upper Bounded Rectifier ( \f$ f(x) = min(a, max(b,x)) \f$ ) */
+ LEAKY_RELU, /**< Leaky Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
+ SOFT_RELU, /**< Soft Rectifier ( \f$ f(x)= log(1+e^x) \f$ ) */
+ ABS, /**< Absolute ( \f$ f(x)= |x| \f$ ) */
+ SQUARE, /**< Square ( \f$ f(x)= x^2 \f$ )*/
+ SQRT, /**< Square root ( \f$ f(x) = \sqrt{x} \f$ )*/
+ LINEAR /**< Linear ( \f$ f(x)= ax + b \f$ ) */
};
/** Default Constructor
*
* @param[in] f The activation function to use.
* @param[in] a (Optional) The alpha parameter used by some activation functions
- * (@ref ActivationFunction::BOUNDED_RELU, @ref ActivationFunction::LINEAR, @ref ActivationFunction::TANH).
- * @param[in] b (Optional) The beta parameter used by some activation functions (@ref ActivationFunction::LINEAR, @ref ActivationFunction::TANH).
+ * (@ref ActivationFunction::BOUNDED_RELU, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::LINEAR, @ref ActivationFunction::TANH).
+ * @param[in] b (Optional) The beta parameter used by some activation functions (@ref ActivationFunction::LINEAR, @ref ActivationFunction::LU_BOUNDED_RELU, @ref ActivationFunction::TANH).
*/
ActivationLayerInfo(ActivationFunction f, float a = 0.0f, float b = 0.0f)
: _act(f), _a(a), _b(b)
diff --git a/src/core/CL/cl_kernels/activation_layer.cl b/src/core/CL/cl_kernels/activation_layer.cl
index 119879afd5..4424a66b61 100644
--- a/src/core/CL/cl_kernels/activation_layer.cl
+++ b/src/core/CL/cl_kernels/activation_layer.cl
@@ -76,6 +76,11 @@ inline TYPE brelu_op(TYPE x)
{
return min((TYPE)A_VAL, max(0, x));
}
+// Lower Upper Bounded RELU Activation
+inline TYPE lu_brelu_op(TYPE x)
+{
+ return min(max(x, (TYPE)B_VAL), (TYPE)A_VAL);
+}
// Leaky RELU Activation
inline TYPE lrelu_op(TYPE x)
{
diff --git a/src/core/NEON/kernels/NEActivationLayerKernel.cpp b/src/core/NEON/kernels/NEActivationLayerKernel.cpp
index 3195411e18..4ff26c0c67 100644
--- a/src/core/NEON/kernels/NEActivationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEActivationLayerKernel.cpp
@@ -73,6 +73,7 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat
{ ActivationFunction::LOGISTIC, &NEActivationLayerKernel::activation<ActivationFunction::LOGISTIC, float> },
{ ActivationFunction::RELU, &NEActivationLayerKernel::activation<ActivationFunction::RELU, float> },
{ ActivationFunction::BOUNDED_RELU, &NEActivationLayerKernel::activation<ActivationFunction::BOUNDED_RELU, float> },
+ { ActivationFunction::LU_BOUNDED_RELU, &NEActivationLayerKernel::activation<ActivationFunction::LU_BOUNDED_RELU, float> },
{ ActivationFunction::LEAKY_RELU, &NEActivationLayerKernel::activation<ActivationFunction::LEAKY_RELU, float> },
{ ActivationFunction::SOFT_RELU, &NEActivationLayerKernel::activation<ActivationFunction::SOFT_RELU, float> },
{ ActivationFunction::SQRT, &NEActivationLayerKernel::activation<ActivationFunction::SQRT, float> },
@@ -89,6 +90,7 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat
{ ActivationFunction::LOGISTIC, &NEActivationLayerKernel::activation<ActivationFunction::LOGISTIC, float16_t> },
{ ActivationFunction::RELU, &NEActivationLayerKernel::activation<ActivationFunction::RELU, float16_t> },
{ ActivationFunction::BOUNDED_RELU, &NEActivationLayerKernel::activation<ActivationFunction::BOUNDED_RELU, float16_t> },
+ { ActivationFunction::LU_BOUNDED_RELU, &NEActivationLayerKernel::activation<ActivationFunction::LU_BOUNDED_RELU, float16_t> },
{ ActivationFunction::SOFT_RELU, &NEActivationLayerKernel::activation<ActivationFunction::SOFT_RELU, float16_t> },
{ ActivationFunction::SQRT, &NEActivationLayerKernel::activation<ActivationFunction::SQRT, float16_t> },
{ ActivationFunction::SQUARE, &NEActivationLayerKernel::activation<ActivationFunction::SQUARE, float16_t> },
@@ -104,6 +106,7 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat
{ ActivationFunction::LOGISTIC, &NEActivationLayerKernel::activation<ActivationFunction::LOGISTIC, qint8_t> },
{ ActivationFunction::RELU, &NEActivationLayerKernel::activation<ActivationFunction::RELU, qint8_t> },
{ ActivationFunction::BOUNDED_RELU, &NEActivationLayerKernel::activation<ActivationFunction::BOUNDED_RELU, qint8_t> },
+ { ActivationFunction::LU_BOUNDED_RELU, &NEActivationLayerKernel::activation<ActivationFunction::LU_BOUNDED_RELU, qint8_t> },
{ ActivationFunction::LEAKY_RELU, &NEActivationLayerKernel::activation<ActivationFunction::LEAKY_RELU, qint8_t> },
{ ActivationFunction::SOFT_RELU, &NEActivationLayerKernel::activation<ActivationFunction::SOFT_RELU, qint8_t> },
{ ActivationFunction::SQRT, &NEActivationLayerKernel::activation<ActivationFunction::SQRT, qint8_t> },
@@ -118,6 +121,7 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat
{ ActivationFunction::LOGISTIC, &NEActivationLayerKernel::activation<ActivationFunction::LOGISTIC, qint16_t> },
{ ActivationFunction::RELU, &NEActivationLayerKernel::activation<ActivationFunction::RELU, qint16_t> },
{ ActivationFunction::BOUNDED_RELU, &NEActivationLayerKernel::activation<ActivationFunction::BOUNDED_RELU, qint16_t> },
+ { ActivationFunction::LU_BOUNDED_RELU, &NEActivationLayerKernel::activation<ActivationFunction::LU_BOUNDED_RELU, qint16_t> },
{ ActivationFunction::LEAKY_RELU, &NEActivationLayerKernel::activation<ActivationFunction::LEAKY_RELU, qint16_t> },
{ ActivationFunction::SOFT_RELU, &NEActivationLayerKernel::activation<ActivationFunction::SOFT_RELU, qint16_t> },
{ ActivationFunction::SQRT, &NEActivationLayerKernel::activation<ActivationFunction::SQRT, qint16_t> },
@@ -211,6 +215,15 @@ typename std::enable_if<std::is_same<T, float16_t>::value, void>::type NEActivat
}
};
break;
+ case ActivationFunction::LU_BOUNDED_RELU:
+ tmp =
+ {
+ {
+ vminq_f16(a, vmaxq_f16(b, in.val[0])),
+ vminq_f16(a, vmaxq_f16(b, in.val[1]))
+ }
+ };
+ break;
case ActivationFunction::LINEAR:
tmp =
{
@@ -370,6 +383,17 @@ typename std::enable_if<std::is_same<T, float>::value, void>::type NEActivationL
}
};
break;
+ case ActivationFunction::LU_BOUNDED_RELU:
+ tmp =
+ {
+ {
+ vminq_f32(a, vmaxq_f32(b, in.val[0])),
+ vminq_f32(a, vmaxq_f32(b, in.val[1])),
+ vminq_f32(a, vmaxq_f32(b, in.val[2])),
+ vminq_f32(a, vmaxq_f32(b, in.val[3])),
+ }
+ };
+ break;
case ActivationFunction::LEAKY_RELU:
tmp =
{
@@ -471,6 +495,9 @@ typename std::enable_if<std::is_same<T, int8_t>::value, void>::type NEActivation
case ActivationFunction::BOUNDED_RELU:
tmp = vminq_qs8(a, vmaxq_qs8(CONST_0, in));
break;
+ case ActivationFunction::LU_BOUNDED_RELU:
+ tmp = vminq_qs8(a, vmaxq_qs8(b, in));
+ break;
case ActivationFunction::LEAKY_RELU:
tmp = vbslq_s8(vcgtq_s8(in, CONST_0), in, vmulq_qs8(a, in, fixed_point_position));
break;
@@ -562,6 +589,15 @@ typename std::enable_if<std::is_same<T, qint16_t>::value, void>::type NEActivati
}
};
break;
+ case ActivationFunction::LU_BOUNDED_RELU:
+ tmp =
+ {
+ {
+ vminq_qs16(a, vmaxq_qs16(b, in.val[0])),
+ vminq_qs16(a, vmaxq_qs16(b, in.val[1])),
+ }
+ };
+ break;
case ActivationFunction::LEAKY_RELU:
tmp =
{
diff --git a/src/core/Utils.cpp b/src/core/Utils.cpp
index cf8e1940ec..66d3fe8f78 100644
--- a/src/core/Utils.cpp
+++ b/src/core/Utils.cpp
@@ -156,6 +156,7 @@ const std::string &arm_compute::string_from_activation_func(ActivationLayerInfo:
{ ActivationLayerInfo::ActivationFunction::LOGISTIC, "LOGISTIC" },
{ ActivationLayerInfo::ActivationFunction::RELU, "RELU" },
{ ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, "BRELU" },
+ { ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, "LU_BRELU" },
{ ActivationLayerInfo::ActivationFunction::LEAKY_RELU, "LRELU" },
{ ActivationLayerInfo::ActivationFunction::SOFT_RELU, "SRELU" },
{ ActivationLayerInfo::ActivationFunction::SQRT, "SQRT" },
diff --git a/tests/TypePrinter.h b/tests/TypePrinter.h
index c207c1d634..bbccaadc6d 100644
--- a/tests/TypePrinter.h
+++ b/tests/TypePrinter.h
@@ -254,6 +254,9 @@ inline ::std::ostream &operator<<(::std::ostream &os, const ActivationLayerInfo:
case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
os << "BOUNDED_RELU";
break;
+ case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
+ os << "LU_BOUNDED_RELU";
+ break;
case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
os << "LEAKY_RELU";
break;
diff --git a/tests/datasets/ActivationFunctionsDataset.h b/tests/datasets/ActivationFunctionsDataset.h
index 3e4f408614..31323dc8be 100644
--- a/tests/datasets/ActivationFunctionsDataset.h
+++ b/tests/datasets/ActivationFunctionsDataset.h
@@ -46,6 +46,7 @@ public:
ActivationLayerInfo::ActivationFunction::LOGISTIC,
ActivationLayerInfo::ActivationFunction::RELU,
ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
+ ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
ActivationLayerInfo::ActivationFunction::LEAKY_RELU,
ActivationLayerInfo::ActivationFunction::SOFT_RELU,
ActivationLayerInfo::ActivationFunction::SQRT,
diff --git a/tests/validation/CPP/ActivationLayer.cpp b/tests/validation/CPP/ActivationLayer.cpp
index fa393be5e1..8fcacca1e2 100644
--- a/tests/validation/CPP/ActivationLayer.cpp
+++ b/tests/validation/CPP/ActivationLayer.cpp
@@ -66,6 +66,9 @@ SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo
case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
dst[i] = std::min<T>(a, std::max(static_cast<T>(0), x));
break;
+ case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
+ dst[i] = std::min<T>(a, std::max<T>(b, x));
+ break;
case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
dst[i] = (x > 0) ? x : a * x;
break;
@@ -125,6 +128,9 @@ SimpleTensor<T> activation_layer(const SimpleTensor<T> &src, ActivationLayerInfo
case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
dst[i] = min(a, max(const_0, x)).raw();
break;
+ case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
+ dst[i] = min(a, max(b, x)).raw();
+ break;
case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
dst[i] = (x > const_0) ? x.raw() : mul(a, x).raw();
break;