aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-07-12 16:12:12 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-09-17 14:16:42 +0100
commit579c0498e161215be1a36080b0b454e5198a992a (patch)
tree1ec07b602935e7261a8a7aea900dc925e9bc35a1
parent81f0d15d6840a0ae8ef571114555a26da74c4a43 (diff)
downloadComputeLibrary-579c0498e161215be1a36080b0b454e5198a992a.tar.gz
COMPMID-417: Add Leaky RELU support for both NEON/CL.
-Adds parametrizable leaky relu (x>0) ? x : a*x. Change-Id: Ief19a435b5832a30b56f4aaaf55125787addee94 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/80575 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
-rw-r--r--arm_compute/core/Types.h1
-rw-r--r--src/core/CL/cl_kernels/activation_layer.cl5
-rw-r--r--src/core/NEON/kernels/NEActivationLayerKernel.cpp72
-rw-r--r--src/core/Utils.cpp1
-rw-r--r--tests/TypePrinter.h9
-rw-r--r--tests/dataset/ActivationFunctionDataset.h5
-rw-r--r--tests/validation/TensorOperations.h18
7 files changed, 77 insertions, 34 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 2bd449c5c6..765cae4ad4 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -505,6 +505,7 @@ public:
TANH, /**< Hyperbolic tangent */
RELU, /**< Rectifier */
BOUNDED_RELU, /**< Bounded Rectifier */
+ LEAKY_RELU, /**< Leaky Rectifier */
SOFT_RELU, /**< Soft Rectifier */
ABS, /**< Absolute */
SQUARE, /**< Square */
diff --git a/src/core/CL/cl_kernels/activation_layer.cl b/src/core/CL/cl_kernels/activation_layer.cl
index 5f812cf5b3..9f958610d6 100644
--- a/src/core/CL/cl_kernels/activation_layer.cl
+++ b/src/core/CL/cl_kernels/activation_layer.cl
@@ -76,6 +76,11 @@ inline TYPE brelu_op(TYPE x)
{
return min((TYPE)A_VAL, max(0, x));
}
+// Leaky RELU Activation
+inline TYPE lrelu_op(TYPE x)
+{
+ return select(MUL_OP((TYPE)A_VAL, x), x, x > (TYPE)0);
+}
// Soft RELU Activation
inline TYPE srelu_op(TYPE x)
{
diff --git a/src/core/NEON/kernels/NEActivationLayerKernel.cpp b/src/core/NEON/kernels/NEActivationLayerKernel.cpp
index f530413453..70b7057fcd 100644
--- a/src/core/NEON/kernels/NEActivationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEActivationLayerKernel.cpp
@@ -73,6 +73,7 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat
{ ActivationFunction::LOGISTIC, &NEActivationLayerKernel::activation<ActivationFunction::LOGISTIC, float> },
{ ActivationFunction::RELU, &NEActivationLayerKernel::activation<ActivationFunction::RELU, float> },
{ ActivationFunction::BOUNDED_RELU, &NEActivationLayerKernel::activation<ActivationFunction::BOUNDED_RELU, float> },
+ { ActivationFunction::LEAKY_RELU, &NEActivationLayerKernel::activation<ActivationFunction::LEAKY_RELU, float> },
{ ActivationFunction::SOFT_RELU, &NEActivationLayerKernel::activation<ActivationFunction::SOFT_RELU, float> },
{ ActivationFunction::SQRT, &NEActivationLayerKernel::activation<ActivationFunction::SQRT, float> },
{ ActivationFunction::SQUARE, &NEActivationLayerKernel::activation<ActivationFunction::SQUARE, float> },
@@ -86,6 +87,7 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat
{ ActivationFunction::LOGISTIC, &NEActivationLayerKernel::activation<ActivationFunction::LOGISTIC, qint8_t> },
{ ActivationFunction::RELU, &NEActivationLayerKernel::activation<ActivationFunction::RELU, qint8_t> },
{ ActivationFunction::BOUNDED_RELU, &NEActivationLayerKernel::activation<ActivationFunction::BOUNDED_RELU, qint8_t> },
+ { ActivationFunction::LEAKY_RELU, &NEActivationLayerKernel::activation<ActivationFunction::LEAKY_RELU, qint8_t> },
{ ActivationFunction::SOFT_RELU, &NEActivationLayerKernel::activation<ActivationFunction::SOFT_RELU, qint8_t> },
{ ActivationFunction::SQRT, &NEActivationLayerKernel::activation<ActivationFunction::SQRT, qint8_t> },
{ ActivationFunction::SQUARE, &NEActivationLayerKernel::activation<ActivationFunction::SQUARE, qint8_t> },
@@ -99,6 +101,7 @@ void NEActivationLayerKernel::configure(ITensor *input, ITensor *output, Activat
{ ActivationFunction::LOGISTIC, &NEActivationLayerKernel::activation<ActivationFunction::LOGISTIC, qint16_t> },
{ ActivationFunction::RELU, &NEActivationLayerKernel::activation<ActivationFunction::RELU, qint16_t> },
{ ActivationFunction::BOUNDED_RELU, &NEActivationLayerKernel::activation<ActivationFunction::BOUNDED_RELU, qint16_t> },
+ { ActivationFunction::LEAKY_RELU, &NEActivationLayerKernel::activation<ActivationFunction::LEAKY_RELU, qint16_t> },
{ ActivationFunction::SOFT_RELU, &NEActivationLayerKernel::activation<ActivationFunction::SOFT_RELU, qint16_t> },
{ ActivationFunction::SQRT, &NEActivationLayerKernel::activation<ActivationFunction::SQRT, qint16_t> },
{ ActivationFunction::SQUARE, &NEActivationLayerKernel::activation<ActivationFunction::SQUARE, qint16_t> },
@@ -177,17 +180,6 @@ typename std::enable_if<std::is_same<T, float>::value, void>::type NEActivationL
}
};
break;
- case ActivationFunction::BOUNDED_RELU:
- tmp =
- {
- {
- vminq_f32(a, vmaxq_f32(CONST_0, in.val[0])),
- vminq_f32(a, vmaxq_f32(CONST_0, in.val[1])),
- vminq_f32(a, vmaxq_f32(CONST_0, in.val[2])),
- vminq_f32(a, vmaxq_f32(CONST_0, in.val[3])),
- }
- };
- break;
case ActivationFunction::LINEAR:
tmp =
{
@@ -221,6 +213,28 @@ typename std::enable_if<std::is_same<T, float>::value, void>::type NEActivationL
}
};
break;
+ case ActivationFunction::BOUNDED_RELU:
+ tmp =
+ {
+ {
+ vminq_f32(a, vmaxq_f32(CONST_0, in.val[0])),
+ vminq_f32(a, vmaxq_f32(CONST_0, in.val[1])),
+ vminq_f32(a, vmaxq_f32(CONST_0, in.val[2])),
+ vminq_f32(a, vmaxq_f32(CONST_0, in.val[3])),
+ }
+ };
+ break;
+ case ActivationFunction::LEAKY_RELU:
+ tmp =
+ {
+ {
+ vbslq_f32(vcgtq_f32(in.val[0], CONST_0), in.val[0], vmulq_f32(a, in.val[0])),
+ vbslq_f32(vcgtq_f32(in.val[1], CONST_0), in.val[1], vmulq_f32(a, in.val[1])),
+ vbslq_f32(vcgtq_f32(in.val[2], CONST_0), in.val[2], vmulq_f32(a, in.val[2])),
+ vbslq_f32(vcgtq_f32(in.val[3], CONST_0), in.val[3], vmulq_f32(a, in.val[3])),
+ }
+ };
+ break;
case ActivationFunction::SOFT_RELU:
tmp =
{
@@ -299,9 +313,6 @@ typename std::enable_if<std::is_same<T, int8_t>::value, void>::type NEActivation
case ActivationFunction::ABS:
tmp = vqabsq_qs8(in);
break;
- case ActivationFunction::BOUNDED_RELU:
- tmp = vminq_qs8(a, vmaxq_qs8(CONST_0, in));
- break;
case ActivationFunction::LINEAR:
tmp = vqmlaq_qs8(b, a, in, fixed_point_position);
break;
@@ -311,6 +322,12 @@ typename std::enable_if<std::is_same<T, int8_t>::value, void>::type NEActivation
case ActivationFunction::RELU:
tmp = vmaxq_qs8(CONST_0, in);
break;
+ case ActivationFunction::BOUNDED_RELU:
+ tmp = vminq_qs8(a, vmaxq_qs8(CONST_0, in));
+ break;
+ case ActivationFunction::LEAKY_RELU:
+ tmp = vbslq_s8(vcgtq_s8(in, CONST_0), in, vmulq_qs8(a, in, fixed_point_position));
+ break;
case ActivationFunction::SOFT_RELU:
tmp = vlogq_qs8(vqaddq_qs8(CONST_1, vqexpq_qs8(in, fixed_point_position)), fixed_point_position);
break;
@@ -363,15 +380,6 @@ typename std::enable_if<std::is_same<T, int16_t>::value, void>::type NEActivatio
}
};
break;
- case ActivationFunction::BOUNDED_RELU:
- tmp =
- {
- {
- vminq_qs16(a, vmaxq_qs16(CONST_0, in.val[0])),
- vminq_qs16(a, vmaxq_qs16(CONST_0, in.val[1])),
- }
- };
- break;
case ActivationFunction::LINEAR:
tmp =
{
@@ -399,6 +407,24 @@ typename std::enable_if<std::is_same<T, int16_t>::value, void>::type NEActivatio
}
};
break;
+ case ActivationFunction::BOUNDED_RELU:
+ tmp =
+ {
+ {
+ vminq_qs16(a, vmaxq_qs16(CONST_0, in.val[0])),
+ vminq_qs16(a, vmaxq_qs16(CONST_0, in.val[1])),
+ }
+ };
+ break;
+ case ActivationFunction::LEAKY_RELU:
+ tmp =
+ {
+ {
+ vbslq_s16(vcgtq_s16(in.val[0], CONST_0), in.val[0], vmulq_qs16(a, in.val[0], fixed_point_position)),
+ vbslq_s16(vcgtq_s16(in.val[1], CONST_0), in.val[1], vmulq_qs16(a, in.val[1], fixed_point_position)),
+ }
+ };
+ break;
case ActivationFunction::SOFT_RELU:
tmp =
{
diff --git a/src/core/Utils.cpp b/src/core/Utils.cpp
index 11b41aa178..cf8e1940ec 100644
--- a/src/core/Utils.cpp
+++ b/src/core/Utils.cpp
@@ -156,6 +156,7 @@ const std::string &arm_compute::string_from_activation_func(ActivationLayerInfo:
{ ActivationLayerInfo::ActivationFunction::LOGISTIC, "LOGISTIC" },
{ ActivationLayerInfo::ActivationFunction::RELU, "RELU" },
{ ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, "BRELU" },
+ { ActivationLayerInfo::ActivationFunction::LEAKY_RELU, "LRELU" },
{ ActivationLayerInfo::ActivationFunction::SOFT_RELU, "SRELU" },
{ ActivationLayerInfo::ActivationFunction::SQRT, "SQRT" },
{ ActivationLayerInfo::ActivationFunction::SQUARE, "SQUARE" },
diff --git a/tests/TypePrinter.h b/tests/TypePrinter.h
index ff9863e1fb..c4f3495761 100644
--- a/tests/TypePrinter.h
+++ b/tests/TypePrinter.h
@@ -197,9 +197,6 @@ inline ::std::ostream &operator<<(::std::ostream &os, const ActivationLayerInfo:
case ActivationLayerInfo::ActivationFunction::ABS:
os << "ABS";
break;
- case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
- os << "BOUNDED_RELU";
- break;
case ActivationLayerInfo::ActivationFunction::LINEAR:
os << "LINEAR";
break;
@@ -209,6 +206,12 @@ inline ::std::ostream &operator<<(::std::ostream &os, const ActivationLayerInfo:
case ActivationLayerInfo::ActivationFunction::RELU:
os << "RELU";
break;
+ case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
+ os << "BOUNDED_RELU";
+ break;
+ case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
+ os << "LEAKY_RELU";
+ break;
case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
os << "SOFT_RELU";
break;
diff --git a/tests/dataset/ActivationFunctionDataset.h b/tests/dataset/ActivationFunctionDataset.h
index bc0e011bde..e6c196560b 100644
--- a/tests/dataset/ActivationFunctionDataset.h
+++ b/tests/dataset/ActivationFunctionDataset.h
@@ -40,17 +40,18 @@ namespace test
* Can be used as input for Boost data test cases to automatically run a test
* case on all activation functions.
*/
-class ActivationFunctions final : public GenericDataset<ActivationLayerInfo::ActivationFunction, 9>
+class ActivationFunctions final : public GenericDataset<ActivationLayerInfo::ActivationFunction, 10>
{
public:
ActivationFunctions()
: GenericDataset
{
ActivationLayerInfo::ActivationFunction::ABS,
- ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
ActivationLayerInfo::ActivationFunction::LINEAR,
ActivationLayerInfo::ActivationFunction::LOGISTIC,
ActivationLayerInfo::ActivationFunction::RELU,
+ ActivationLayerInfo::ActivationFunction::BOUNDED_RELU,
+ ActivationLayerInfo::ActivationFunction::LEAKY_RELU,
ActivationLayerInfo::ActivationFunction::SOFT_RELU,
ActivationLayerInfo::ActivationFunction::SQRT,
ActivationLayerInfo::ActivationFunction::SQUARE,
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h
index e2747249b4..27c50cf6d2 100644
--- a/tests/validation/TensorOperations.h
+++ b/tests/validation/TensorOperations.h
@@ -868,9 +868,6 @@ void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo a
case ActivationLayerInfo::ActivationFunction::ABS:
out[i] = std::abs(x);
break;
- case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
- out[i] = std::min<T>(a, std::max<T>(0, x));
- break;
case ActivationLayerInfo::ActivationFunction::LINEAR:
out[i] = a * x + b;
break;
@@ -880,6 +877,12 @@ void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo a
case ActivationLayerInfo::ActivationFunction::RELU:
out[i] = std::max<T>(0, x);
break;
+ case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
+ out[i] = std::min<T>(a, std::max<T>(0, x));
+ break;
+ case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
+ out[i] = (x > 0) ? x : a * x;
+ break;
case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
out[i] = std::log(static_cast<T>(1) + std::exp(x));
break;
@@ -919,9 +922,6 @@ void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo a
case ActivationLayerInfo::ActivationFunction::ABS:
out[i] = abs(x).raw();
break;
- case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
- out[i] = min(a, max(const_0, x)).raw();
- break;
case ActivationLayerInfo::ActivationFunction::LINEAR:
out[i] = add(b, mul(a, x)).raw();
break;
@@ -931,6 +931,12 @@ void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo a
case ActivationLayerInfo::ActivationFunction::RELU:
out[i] = max(const_0, x).raw();
break;
+ case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
+ out[i] = min(a, max(const_0, x)).raw();
+ break;
+ case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
+ out[i] = (x > const_0) ? x.raw() : mul(a, x).raw();
+ break;
case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
out[i] = log(const_1 + exp(x)).raw();
break;