aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-06-22 18:13:55 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-09-17 14:16:42 +0100
commit00394ae1015c1eaa73f4d98fad31b7771063cd3a (patch)
tree673634921bc8e9d5781787f2a46fdbc9aa1b0dd8
parentb797fa235f714440ffa7a2ad4eef7ae14ee45da4 (diff)
downloadComputeLibrary-00394ae1015c1eaa73f4d98fad31b7771063cd3a.tar.gz
COMPMID-406: Port CLActivationLayer to use QS8/QS16.
Change-Id: Ia4114984c38e1d2027ad97335b3c6c11f5754e23 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/78727 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
-rw-r--r--arm_compute/core/CL/kernels/CLActivationLayerKernel.h4
-rw-r--r--arm_compute/core/NEON/NEFixedPoint.inl6
-rw-r--r--arm_compute/runtime/CL/functions/CLActivationLayer.h4
-rw-r--r--src/core/CL/cl_kernels/activation_layer.cl123
-rw-r--r--src/core/CL/cl_kernels/fixed_point.h116
-rw-r--r--src/core/CL/kernels/CLActivationLayerKernel.cpp41
-rw-r--r--tests/validation/CL/ActivationLayer.cpp69
-rw-r--r--tests/validation/NEON/ActivationLayer.cpp32
-rw-r--r--tests/validation/Reference.cpp9
9 files changed, 309 insertions, 95 deletions
diff --git a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
index df22574de8..a06f2fa0ae 100644
--- a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
@@ -51,8 +51,8 @@ public:
* @note If the output tensor is a nullptr, the activation function will be performed in-place
*
* @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result
- * of the activation function. Data types supported: F16/F32.
- * @param[out] output Destination tensor. Data type should match the input data type.
+ * of the activation function. Data types supported: QS8/QS16/F16/F32.
+ * @param[out] output Destination tensor. Data type supported: same as @p input
* @param[in] act_info Activation layer information.
*/
void configure(ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info);
diff --git a/arm_compute/core/NEON/NEFixedPoint.inl b/arm_compute/core/NEON/NEFixedPoint.inl
index f62a338a61..4e862ba387 100644
--- a/arm_compute/core/NEON/NEFixedPoint.inl
+++ b/arm_compute/core/NEON/NEFixedPoint.inl
@@ -21,6 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include <limits>
namespace arm_compute
{
@@ -1196,7 +1197,7 @@ inline qint8x16_t vqrecipq_qs8(qint8x16_t a, int fixed_point_position)
const qint8x16_t shift_value = vqnegq_s8(vqsubq_s8(vdupq_n_s8(8), vqaddq_s8(vclzq_s8(a), vdupq_n_s8(fixed_point_position))));
const qint8x16_t temp = vqshlq_s8(a, shift_value);
- qint8x16_t x = vqsubq_qs8(const_48_over_17, vmulq_qs8(temp, const_32_over_17, fixed_point_position));
+ qint8x16_t x = vqsubq_qs8(const_48_over_17, vqmulq_qs8(temp, const_32_over_17, fixed_point_position));
// Set initial guess to one if x > 1
uint8x16_t set_one = vcgtq_s8(x, const_one);
@@ -1234,7 +1235,8 @@ inline qint16x8_t vqrecipq_qs16(qint16x8_t a, int fixed_point_position)
x = vqaddq_s16(x, vqmulq_qs16(x, vqsubq_s16(const_one, vqmulq_qs16(temp, x, fixed_point_position)), fixed_point_position));
x = vqaddq_s16(x, vqmulq_qs16(x, vqsubq_s16(const_one, vqmulq_qs16(temp, x, fixed_point_position)), fixed_point_position));
- return vqshlq_s16(x, shift_value);
+ // Saturate result in case of overflow
+ return vbslq_s16(vceqq_s16(a, vdupq_n_s16(0)), vdupq_n_s16(std::numeric_limits<int16_t>::max()), vqshlq_s16(x, shift_value));
}
inline qint8x8_t vdiv_qs8(qint8x8_t a, qint8x8_t b, int fixed_point_position)
diff --git a/arm_compute/runtime/CL/functions/CLActivationLayer.h b/arm_compute/runtime/CL/functions/CLActivationLayer.h
index 3028afb25b..a1aeb193d1 100644
--- a/arm_compute/runtime/CL/functions/CLActivationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLActivationLayer.h
@@ -44,8 +44,8 @@ public:
* @note If the output tensor is a nullptr, the activation function will be performed in-place
*
* @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result
- * of the activation function. Data types supported: F16/F32.
- * @param[out] output Destination tensor. Data type should match the input data type.
+ * of the activation function. Data types supported: QS8/QS16/F16/F32.
+ * @param[out] output Destination tensor. Data type supported: same as @p input
* @param[in] act_info Activation layer parameters.
*/
void configure(ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info);
diff --git a/src/core/CL/cl_kernels/activation_layer.cl b/src/core/CL/cl_kernels/activation_layer.cl
index 721c43c017..5f812cf5b3 100644
--- a/src/core/CL/cl_kernels/activation_layer.cl
+++ b/src/core/CL/cl_kernels/activation_layer.cl
@@ -23,16 +23,99 @@
*/
#include "helpers.h"
+#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+
+#if defined(FIXED_POINT_POSITION)
+#include "fixed_point.h"
+
+#define CONST_ONE (1 << FIXED_POINT_POSITION)
+#define ABS_OP(a) ABS_SAT_OP_EXPAND((a), DATA_TYPE, VEC_SIZE)
+#define ADD_OP(a, b) ADD_SAT_OP_EXPAND((a), (b), DATA_TYPE, VEC_SIZE)
+#define SUB_OP(a, b) SUB_SAT_OP_EXPAND((a), (b), DATA_TYPE, VEC_SIZE)
+#define MUL_OP(a, b) MUL_SAT_OP_EXPAND((a), (b), DATA_TYPE, VEC_SIZE, FIXED_POINT_POSITION)
+#define MLA_OP(a, b, c) MLA_SAT_OP_EXPAND((a), (b), (c), DATA_TYPE, VEC_SIZE, FIXED_POINT_POSITION)
+#define DIV_OP(a, b) DIV_SAT_OP_EXPAND((a), (b), DATA_TYPE, VEC_SIZE, FIXED_POINT_POSITION)
+#define EXP_OP(a) EXP_OP_EXPAND((a), DATA_TYPE, VEC_SIZE, FIXED_POINT_POSITION)
+#define LOG_OP(a) LOG_OP_EXPAND((a), DATA_TYPE, VEC_SIZE, FIXED_POINT_POSITION)
+#define SQRT_OP(a) DIV_OP(CONST_ONE, INVSQRT_OP_EXPAND((a), DATA_TYPE, VEC_SIZE, FIXED_POINT_POSITION))
+#define TANH_OP(a) TANH_OP_EXPAND((a), DATA_TYPE, VEC_SIZE, FIXED_POINT_POSITION)
+
+#else /* FIXED_POINT_POSITION */
+
+#define CONST_ONE (1.f)
+#define ABS_OP(a) fabs((a))
+#define ADD_OP(a, b) ((a) + (b))
+#define SUB_OP(a, b) ((a) - (b))
+#define MUL_OP(a, b) ((a) * (b))
+#define MLA_OP(a, b, c) ((b) * (c) + (a))
+#define DIV_OP(a, b) ((a) / (b))
+#define EXP_OP(a) exp((a))
+#define LOG_OP(a) log((a))
+#define SQRT_OP(a) sqrt((a))
+#define TANH_OP(a) tanh((a))
+
+#endif /* FIXED_POINT_POSITION */
+
+// Logistic Activation
+inline TYPE logistic_op(TYPE x)
+{
+ return DIV_OP(CONST_ONE, ADD_OP(CONST_ONE, EXP_OP(-x)));
+}
+// Hyperbolic Tangent Activation
+inline TYPE tanh_op(TYPE x)
+{
+ return MUL_OP((TYPE)A_VAL, TANH_OP(MUL_OP((TYPE)B_VAL, x)));
+}
+// RELU Tangent Activation
+inline TYPE relu_op(TYPE x)
+{
+ return max(0, x);
+}
+// Bounded RELU Activation
+inline TYPE brelu_op(TYPE x)
+{
+ return min((TYPE)A_VAL, max(0, x));
+}
+// Soft RELU Activation
+inline TYPE srelu_op(TYPE x)
+{
+ return LOG_OP(ADD_OP(CONST_ONE, EXP_OP(x)));
+}
+// Absolute Activation
+inline TYPE abs_op(TYPE x)
+{
+ return ABS_OP(x);
+}
+// Square Activation
+inline TYPE square_op(TYPE x)
+{
+ return MUL_OP(x, x);
+}
+// Square-root Activation
+inline TYPE sqrt_op(TYPE x)
+{
+ return SQRT_OP(x);
+}
+// Linear Activation
+inline TYPE linear_op(TYPE x)
+{
+ return MLA_OP((TYPE)B_VAL, (TYPE)A_VAL, x);
+}
+
+#define ACTIVATION_OP2(op, x) op##_op(x)
+#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x)
+
/** This performs an activation function floating point inputs.
*
* @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
- * @note Activation function should be given as a preprocessor argument using -DNAME. e.g. -DTANH
- * @note Distinction between floating point and integer is done using -DTYPE_FP and -DTYPE_INT preprocessor argument
- * @note A, B variables required by some activation functions are set using -DA= and -DB= respectively.
+ * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH
+ * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
+ * @note In case of fixed point calculations the fixed point position is passed using -DFIXED_POINT_POSITION=position. e.g. -DFIXED_POINT_POSITION=3.
*
- * @param[in] input_ptr Pointer to the source image. Supported data types: F16, F32
+ * @param[in] input_ptr Pointer to the source image. Supported data types: QS8/QS16/F16/F32
* @param[in] input_stride_x Stride of the source image in X dimension (in bytes)
* @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] input_stride_y Stride of the source image in Y dimension (in bytes)
@@ -40,7 +123,7 @@
* @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
* @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] input_offset_first_element_in_bytes The offset of the first element in the source image
- * @param[out] output_ptr Pointer to the destination image. Supported data types: F16, F32
+ * @param[out] output_ptr Pointer to the destination image. Supported data types: same as @p input_ptr
* @param[in] output_stride_x Stride of the destination image in X dimension (in bytes)
* @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] output_stride_y Stride of the destination image in Y dimension (in bytes)
@@ -66,34 +149,12 @@ __kernel void activation_layer(
#endif /* IN_PLACE */
// Load data
- VEC_DATA_TYPE(DATA_TYPE, 16)
- data = vload16(0, (__global DATA_TYPE *)input.ptr);
+ TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr);
// Perform activation
-#ifdef LOGISTIC
- data = 1 / (1 + exp(-data));
-#elif defined(TANH)
- data = (VEC_DATA_TYPE(DATA_TYPE, 16))A * tanh((VEC_DATA_TYPE(DATA_TYPE, 16))B * data);
-#elif defined(RELU)
- data = max(0, data);
-#elif defined(BRELU)
- data = min((VEC_DATA_TYPE(DATA_TYPE, 16))A, max(0, data));
-#elif defined(SRELU)
- data = log(1 + exp(data));
-#elif defined(ABS)
-#ifdef TYPE_INT
- data = abs(data);
-#else /* TYPE_INT */
- data = fabs(data);
-#endif /* TYPE_INT */
-#elif defined(SQUARE)
- data = data * data;
-#elif defined(SQRT)
- data = sqrt(data);
-#elif defined(LINEAR)
- data = (VEC_DATA_TYPE(DATA_TYPE, 16))A * data + (VEC_DATA_TYPE(DATA_TYPE, 16))B;
-#endif /* switch TANH, RELU, BRELU, SRELU, ABS, SQUARE, SQRT, LINEAR */
+ data = ACTIVATION_OP(ACT, data);
// Store result
- vstore16(data, 0, (__global DATA_TYPE *)output.ptr);
+ VSTORE(VEC_SIZE)
+ (data, 0, (__global DATA_TYPE *)output.ptr);
}
diff --git a/src/core/CL/cl_kernels/fixed_point.h b/src/core/CL/cl_kernels/fixed_point.h
index 5d340c4e95..bb534f5a51 100644
--- a/src/core/CL/cl_kernels/fixed_point.h
+++ b/src/core/CL/cl_kernels/fixed_point.h
@@ -99,6 +99,24 @@ TYPE_ALIAS(int, qs32)
#define CONVERT_SAT_STR(x, type) CONVERT_SAT_STR2(x, type, type##_TYPE)
#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
+/** Computes saturating absolute value of fixed point vector.
+ *
+ * @param[in] type the actual data type.
+ *
+ * @return The result of the fixed point absolute value.
+ */
+#define ABSQ_SAT_IMPL(type) \
+ inline type abs_##type##_sat(type VopA) \
+ { \
+ return CONVERT_SAT(abs(VopA), type); \
+ }
+
+ABSQ_SAT_IMPL(qs8x16)
+ABSQ_SAT_IMPL(qs16x8)
+
+#define ABS_SAT_OP_EXPAND_STR(a, type, size) abs_##type##x##size##_sat((a))
+#define ABS_SAT_OP_EXPAND(a, type, size) ABS_SAT_OP_EXPAND_STR(a, type, size)
+
/** Computes max of fixed point types.
*
* @param[in] type the actual data type.
@@ -280,6 +298,7 @@ MLALQ_SAT_IMPL(qs16x8, qs32x8)
}
DIVQ_SAT_IMPL(qs8, qs8x16, qs16x16)
+DIVQ_SAT_IMPL(qs16, qs16x8, qs32x8)
DIVQ_SAT_IMPL(qs16, qs16x16, qs32x16)
#define DIV_SAT_OP_EXPAND_STR(a, b, type, size, position) div_sat_##type##x##size((a), (b), (position))
@@ -287,34 +306,37 @@ DIVQ_SAT_IMPL(qs16, qs16x16, qs32x16)
/** Saturate exponential of a fixed point vector
*
+ * @note Implemented approach uses taylor polynomial to approximate the exponential function.
+ *
* @param[in] stype the actual scalar data type.
* @param[in] type the actual data type.
* @param[in] size the number of the calculated elements.
*
* @return The result of the fixed point exponential. The result is saturated in case of overflow
*/
-#define EXPQ_IMPL(stype, type, size) \
- inline type exp_sat_##type(type VopA, int fixed_point_position) \
- { \
- type const_one = (type)(1 << (fixed_point_position)); \
- type ln2 = (type)((((0x58B9 >> (14 - fixed_point_position))) + 1) >> 1); \
- type inv_ln2 = (type)((((0x38AA >> (14 - fixed_point_position)) + 1) >> 1)) | const_one; \
- type A = (type)(((0x7FBA >> (14 - fixed_point_position)) + 1) >> 1); \
- type B = (type)(((0x3FE9 >> (14 - fixed_point_position)) + 1) >> 1); \
- type C = (type)(((0x1693 >> (14 - fixed_point_position)) + 1) >> 1); \
- type D = (type)(((0x0592 >> (14 - fixed_point_position)) + 1) >> 1); \
- type m = MUL_SAT_OP_EXPAND(VopA, inv_ln2, stype, size, fixed_point_position); \
- type dec_m = m >> (type)fixed_point_position; \
- type alpha = MUL_SAT_OP_EXPAND(dec_m << (type)fixed_point_position, ln2, stype, size, fixed_point_position); \
- alpha = CONVERT(abs_diff(VopA, alpha), type); \
- type sum = add_sat(MUL_SAT_OP_EXPAND(alpha, D, stype, size, fixed_point_position), C); \
- sum = add_sat(MUL_SAT_OP_EXPAND(alpha, sum, stype, size, fixed_point_position), B); \
- sum = add_sat(MUL_SAT_OP_EXPAND(alpha, sum, stype, size, fixed_point_position), A); \
- sum = add_sat(MUL_SAT_OP_EXPAND(alpha, sum, stype, size, fixed_point_position), const_one); \
- return select(select(sum << dec_m, sum >> -dec_m, dec_m < (type)0), (type)stype##_MAX, clz(sum) <= dec_m); \
+#define EXPQ_IMPL(stype, type, size) \
+ inline type exp_sat_##type(type VopA, int fixed_point_position) \
+ { \
+ type const_one = (type)(1 << (fixed_point_position)); \
+ type ln2 = (type)((((0x58B9 >> (14 - fixed_point_position))) + 1) >> 1); \
+ type inv_ln2 = (type)((((0x38AA >> (14 - fixed_point_position)) + 1) >> 1)) | const_one; \
+ type A = (type)(((0x7FBA >> (14 - fixed_point_position)) + 1) >> 1); \
+ type B = (type)(((0x3FE9 >> (14 - fixed_point_position)) + 1) >> 1); \
+ type C = (type)(((0x1693 >> (14 - fixed_point_position)) + 1) >> 1); \
+ type D = (type)(((0x0592 >> (14 - fixed_point_position)) + 1) >> 1); \
+ type m = MUL_SAT_OP_EXPAND(VopA, inv_ln2, stype, size, fixed_point_position); \
+ type dec_m = m >> (type)fixed_point_position; \
+ type alpha = MUL_SAT_OP_EXPAND(dec_m << (type)fixed_point_position, ln2, stype, size, fixed_point_position); \
+ alpha = CONVERT(abs_diff(VopA, alpha), type); \
+ type sum = add_sat(MUL_SAT_OP_EXPAND(alpha, D, stype, size, fixed_point_position), C); \
+ sum = add_sat(MUL_SAT_OP_EXPAND(alpha, sum, stype, size, fixed_point_position), B); \
+ sum = add_sat(MUL_SAT_OP_EXPAND(alpha, sum, stype, size, fixed_point_position), A); \
+ sum = add_sat(MUL_SAT_OP_EXPAND(alpha, sum, stype, size, fixed_point_position), const_one); \
+ return select((type)stype##_MAX, select(sum << dec_m, sum >> -dec_m, dec_m < (type)0), clz(sum) > dec_m); /* Saturate result if needed */ \
}
EXPQ_IMPL(qs8, qs8x16, 16)
+EXPQ_IMPL(qs16, qs16x8, 8)
EXPQ_IMPL(qs16, qs16x16, 16)
#define EXP_OP_EXPAND_STR(a, type, size, position) exp_sat_##type##x##size((a), (position))
@@ -322,6 +344,8 @@ EXPQ_IMPL(qs16, qs16x16, 16)
/** Saturate logarithm of a fixed point vector
*
+ * @note Implemented approach uses taylor polynomial to approximate the logarithm function.
+ *
* @param[in] stype the actual scalar data type.
* @param[in] type the actual data type.
* @param[in] size the number of the calculated elements.
@@ -332,11 +356,11 @@ EXPQ_IMPL(qs16, qs16x16, 16)
inline type log_sat_##type(type VopA, int fixed_point_position) \
{ \
type const_one = (type)(1 << (fixed_point_position)); \
- type ln2 = (type)(0x58B9 >> (15 - fixed_point_position)); \
- type A = (type)(0x5C0F >> (14 - fixed_point_position)); \
- type B = -(type)(0x56AE >> (15 - fixed_point_position)); \
- type C = (type)(0x2933 >> (15 - fixed_point_position)); \
- type D = -(type)(0x0AA7 >> (15 - fixed_point_position)); \
+ type ln2 = (type)(0x58B9 >> (15 - fixed_point_position)); /* 1.4384189 */ \
+ type A = (type)(0x5C0F >> (14 - fixed_point_position)); /* 1.4384189 */ \
+ type B = -(type)(0x56AE >> (15 - fixed_point_position)); /* -0.6771900 */ \
+ type C = (type)(0x2933 >> (15 - fixed_point_position)); /* 0.3218538 */ \
+ type D = -(type)(0x0AA7 >> (15 - fixed_point_position)); /* -0.0832229 */ \
type inter_a = select(VopA, DIV_SAT_OP_EXPAND(const_one, VopA, stype, size, fixed_point_position), VopA < const_one); \
type shift_val = (type)(15 - stype##_SHIFT) - clz(inter_a >> (type)fixed_point_position); \
inter_a = inter_a >> shift_val; \
@@ -346,16 +370,19 @@ EXPQ_IMPL(qs16, qs16x16, 16)
sum = add_sat(MUL_SAT_OP_EXPAND(inter_a, sum, stype, size, fixed_point_position), A); \
sum = MUL_SAT_OP_EXPAND(inter_a, sum, stype, size, fixed_point_position); \
sum = MUL_SAT_OP_EXPAND(add_sat(sum, shift_val << (type)fixed_point_position), ln2, stype, size, fixed_point_position); \
- return select(select(sum, -sum, VopA < const_one), (type)0, VopA < (type)0); \
+ return select(select(sum, -sum, VopA < const_one), (type)0, VopA < (type)0); /* Saturate result if needed */ \
}
LOGQ_IMPL(qs8, qs8x16, 16)
+LOGQ_IMPL(qs16, qs16x8, 8)
#define LOG_OP_EXPAND_STR(a, type, size, position) log_sat_##type##x##size((a), (position))
#define LOG_OP_EXPAND(a, type, size, position) LOG_OP_EXPAND_STR(a, type, size, position)
/** Saturate inverse square root of a fixed point vector
*
+ * @note Implemented approach uses Newton's method to approximate the inverse square root function.
+ *
* @param[in] stype the actual scalar data type.
* @param[in] type the actual data type.
* @param[in] size the number of the calculated elements.
@@ -367,20 +394,53 @@ LOGQ_IMPL(qs8, qs8x16, 16)
{ \
type const_three = (type)(3 << (fixed_point_position)); \
type shift_value = (type)(16 - stype##_SHIFT) - (clz(VopA) + (type)fixed_point_position); \
- type temp = select(VopA >> shift_value, VopA << (-shift_value), shift_value < (type)0); \
+ type temp = select(VopA >> shift_value, select((type)stype##_MAX, VopA << (-shift_value), clz(VopA) > (-shift_value)), shift_value < (type)0); \
type x = temp; \
x = MUL_SAT_OP_EXPAND(x, sub_sat(const_three, MUL_SAT_OP_EXPAND(MUL_SAT_OP_EXPAND(x, x, stype, size, fixed_point_position), temp, stype, size, fixed_point_position)), stype, size, fixed_point_position) >> 1; \
x = MUL_SAT_OP_EXPAND(x, sub_sat(const_three, MUL_SAT_OP_EXPAND(MUL_SAT_OP_EXPAND(x, x, stype, size, fixed_point_position), temp, stype, size, fixed_point_position)), stype, size, fixed_point_position) >> 1; \
x = MUL_SAT_OP_EXPAND(x, sub_sat(const_three, MUL_SAT_OP_EXPAND(MUL_SAT_OP_EXPAND(x, x, stype, size, fixed_point_position), temp, stype, size, fixed_point_position)), stype, size, fixed_point_position) >> 1; \
- type res = select(x >> (shift_value >> 1), x << ((-shift_value) >> 1), shift_value < (type)0); \
- return select(res, stype##_MAX, res < (type)0); \
+ if(sizeof((stype)(1)) > 1) /* Perform more iterations if datatype is QS16 */ \
+ { \
+ x = MUL_SAT_OP_EXPAND(x, sub_sat(const_three, MUL_SAT_OP_EXPAND(MUL_SAT_OP_EXPAND(x, x, stype, size, fixed_point_position), temp, stype, size, fixed_point_position)), stype, size, fixed_point_position) >> 1; \
+ x = MUL_SAT_OP_EXPAND(x, sub_sat(const_three, MUL_SAT_OP_EXPAND(MUL_SAT_OP_EXPAND(x, x, stype, size, fixed_point_position), temp, stype, size, fixed_point_position)), stype, size, fixed_point_position) >> 1; \
+ } \
+ type shift_value2 = select(shift_value >> 1, (-shift_value) >> 1, shift_value < (type)0); \
+ return select(x >> shift_value2, select((type)stype##_MAX, x << shift_value2, clz(x) > shift_value2), shift_value < (type)0); /* Saturate result if needed */ \
}
INVSQRTQ_IMPL(qs8, qs8x16, 16)
+INVSQRTQ_IMPL(qs16, qs16x8, 8)
#define INVSQRT_OP_EXPAND_STR(a, type, size, position) invsqrt_sat_##type##x##size((a), (position))
#define INVSQRT_OP_EXPAND(a, type, size, position) INVSQRT_OP_EXPAND_STR(a, type, size, position)
+/** Saturate hyperbolic tangent of a fixed point vector
+ *
+ * tanh(x) = (e^2x - 1)/(e^2x + 1)
+ *
+ * @param[in] stype the actual scalar data type.
+ * @param[in] type the actual data type.
+ * @param[in] size the number of the calculated elements.
+ *
+ * @return The result of the fixed point hyperbolic tangent. The result is saturated in case of overflow
+ */
+#define TANHQ_IMPL(stype, type, size) \
+ inline type tanh_sat_##type(type VopA, int fixed_point_position) \
+ { \
+ type const_one = (type)(1 << (fixed_point_position)); \
+ type const_two = (type)(2 << (fixed_point_position)); \
+ type exp2x = EXP_OP_EXPAND(MUL_SAT_OP_EXPAND(const_two, VopA, stype, size, fixed_point_position), stype, size, fixed_point_position); \
+ type num = SUB_SAT_OP_EXPAND(exp2x, const_one, stype, size); \
+ type den = ADD_SAT_OP_EXPAND(exp2x, const_one, stype, size); \
+ return DIV_SAT_OP_EXPAND(num, den, stype, size, fixed_point_position); \
+ }
+
+TANHQ_IMPL(qs8, qs8x16, 16)
+TANHQ_IMPL(qs16, qs16x8, 8)
+
+#define TANH_OP_EXPAND_STR(a, type, size, position) tanh_sat_##type##x##size((a), (position))
+#define TANH_OP_EXPAND(a, type, size, position) TANH_OP_EXPAND_STR(a, type, size, position)
+
#define floatx16 float16
#define float16_TYPE float16
diff --git a/src/core/CL/kernels/CLActivationLayerKernel.cpp b/src/core/CL/kernels/CLActivationLayerKernel.cpp
index fda69b0b94..18202c1c5b 100644
--- a/src/core/CL/kernels/CLActivationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLActivationLayerKernel.cpp
@@ -26,6 +26,7 @@
#include "arm_compute/core/CL/CLHelpers.h"
#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/FixedPoint.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/IAccessWindow.h"
#include "arm_compute/core/TensorInfo.h"
@@ -33,6 +34,10 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
+#include "support/ToolchainSupport.h"
+
+#include <cmath>
+
using namespace arm_compute;
CLActivationLayerKernel::CLActivationLayerKernel()
@@ -42,7 +47,7 @@ CLActivationLayerKernel::CLActivationLayerKernel()
void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
if(output != nullptr)
{
@@ -54,20 +59,33 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act
ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
}
+ const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
+ const int fixed_point_position = input->info()->fixed_point_position();
+ float a_const = act_info.a();
+ float b_const = act_info.b();
+ if(is_data_type_fixed_point(input->info()->data_type()))
+ {
+ a_const = static_cast<int>(lround(a_const * (1 << fixed_point_position)));
+ b_const = static_cast<int>(lround(b_const * (1 << fixed_point_position)));
+ }
+
// Set build options
std::set<std::string> build_opts;
- build_opts.insert(("-D" + string_from_activation_func(act_info.activation())));
- build_opts.insert(("-D" + ((is_data_type_float(input->info()->data_type())) ? std::string("TYPE_FP") : std::string("TYPE_INT"))));
- build_opts.insert(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
- build_opts.insert(("-DA=" + support::cpp11::to_string(act_info.a())));
- build_opts.insert(("-DB=" + support::cpp11::to_string(act_info.b())));
- build_opts.insert(output == nullptr ? "-DIN_PLACE" : "");
+ build_opts.emplace(("-DACT=" + lower_string(string_from_activation_func(act_info.activation()))));
+ build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
+ build_opts.emplace(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)));
+ build_opts.emplace(("-DA_VAL=" + support::cpp11::to_string(a_const)));
+ build_opts.emplace(("-DB_VAL=" + support::cpp11::to_string(b_const)));
+ build_opts.emplace(output == nullptr ? "-DIN_PLACE" : "");
+ if(is_data_type_fixed_point(input->info()->data_type()))
+ {
+ build_opts.emplace(("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(fixed_point_position)));
+ }
// Create kernel
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("activation_layer", build_opts));
// Make sure _kernel is initialized before calling the parent's configure
- constexpr unsigned int num_elems_processed_per_iteration = 16;
_input = input;
_output = output;
@@ -77,12 +95,9 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act
if(output != nullptr)
{
+ AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
-
- update_window_and_padding(win,
- AccessWindowHorizontal(input->info(), 0, num_elems_processed_per_iteration),
- output_access);
-
+ update_window_and_padding(win, input_access, output_access);
output_access.set_valid_region(win, input->info()->valid_region());
}
else
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp
index 7286b93485..ac1da5c8b4 100644
--- a/tests/validation/CL/ActivationLayer.cpp
+++ b/tests/validation/CL/ActivationLayer.cpp
@@ -124,7 +124,14 @@ CLTensor compute_activation_layer(bool in_place, const TensorShape &shape, DataT
{
int min_bound = 0;
int max_bound = 0;
- std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<int8_t>(act_info.activation(), fixed_point_position);
+ if(dt == DataType::QS8)
+ {
+ std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<int8_t>(act_info.activation(), fixed_point_position);
+ }
+ else
+ {
+ std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<int16_t>(act_info.activation(), fixed_point_position);
+ }
std::uniform_int_distribution<> distribution(min_bound, max_bound);
library->fill(CLAccessor(src), distribution, 0);
}
@@ -148,7 +155,7 @@ BOOST_AUTO_TEST_SUITE(CL)
BOOST_AUTO_TEST_SUITE(ActivationLayer)
BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(Configuration, boost::unit_test::data::make({ false, true }) * (SmallShapes() + LargeShapes()) * CNNFloatDataTypes(), in_place, shape, dt)
+BOOST_DATA_TEST_CASE(Configuration, boost::unit_test::data::make({ false, true }) * (SmallShapes() + LargeShapes()) * CNNDataTypes(), in_place, shape, dt)
{
// Set fixed point position data type allowed
const int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
@@ -182,7 +189,8 @@ BOOST_DATA_TEST_CASE(Configuration, boost::unit_test::data::make({ false, true }
}
// Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
+ const int step = 16 / arm_compute::data_size_from_type(dt);
+ const PaddingSize padding = PaddingCalculator(shape.x(), step).required_padding();
validate(src.info()->padding(), padding);
if(!in_place)
@@ -193,10 +201,11 @@ BOOST_DATA_TEST_CASE(Configuration, boost::unit_test::data::make({ false, true }
BOOST_AUTO_TEST_SUITE(Float)
BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, boost::unit_test::data::make({ false, true }) * SmallShapes() * CNNFloatDataTypes() * ActivationFunctions(), in_place, shape, dt, act_function)
+BOOST_DATA_TEST_CASE(RunSmall, boost::unit_test::data::make({ false, true }) * SmallShapes() * CNNFloatDataTypes() * ActivationFunctions() * boost::unit_test::data::make({ 0.5f, 1.f }),
+ in_place, shape, dt, act_function, alpha_beta)
{
// Create activation layer info
- ActivationLayerInfo act_info(act_function, 1.f, 1.f);
+ ActivationLayerInfo act_info(act_function, alpha_beta, alpha_beta);
// Compute function
CLTensor dst = compute_activation_layer(in_place, shape, dt, act_info);
@@ -209,10 +218,11 @@ BOOST_DATA_TEST_CASE(RunSmall, boost::unit_test::data::make({ false, true }) * S
}
BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(RunLarge, boost::unit_test::data::make({ false, true }) * LargeShapes() * CNNFloatDataTypes() * ActivationFunctions(), in_place, shape, dt, act_function)
+BOOST_DATA_TEST_CASE(RunLarge, boost::unit_test::data::make({ false, true }) * LargeShapes() * CNNFloatDataTypes() * ActivationFunctions() * boost::unit_test::data::make({ 0.5f, 1.f }),
+ in_place, shape, dt, act_function, alpha_beta)
{
// Create activation layer info
- ActivationLayerInfo act_info(act_function, 1.f, 1.f);
+ ActivationLayerInfo act_info(act_function, alpha_beta, alpha_beta);
// Compute function
CLTensor dst = compute_activation_layer(in_place, shape, dt, act_info);
@@ -225,6 +235,49 @@ BOOST_DATA_TEST_CASE(RunLarge, boost::unit_test::data::make({ false, true }) * L
}
BOOST_AUTO_TEST_SUITE_END()
+/** @note We test for fixed point precision [3,5] because [1,2] and [6,7] ranges
+ * cause overflowing issues in most of the transcendentals functions.
+ */
+BOOST_AUTO_TEST_SUITE(Quantized)
+BOOST_AUTO_TEST_SUITE(QS8)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(RunSmall, boost::unit_test::data::make({ false, true }) * SmallShapes() * ActivationFunctions() * boost::unit_test::data::xrange(3, 6, 1) * boost::unit_test::data::make({ 0.5f, 1.f }),
+ in_place, shape, act_function, fixed_point_position, alpha_beta)
+{
+ // Create activation layer info
+ ActivationLayerInfo act_info(act_function, alpha_beta, alpha_beta);
+
+ // Compute function
+ CLTensor dst = compute_activation_layer(in_place, shape, DataType::QS8, act_info, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_activation_layer(shape, DataType::QS8, act_info, fixed_point_position);
+
+ // Validate output
+ validate(CLAccessor(dst), ref_dst, activation_layer_tolerance(act_function, fixed_point_position));
+}
+BOOST_AUTO_TEST_SUITE_END()
+
+BOOST_AUTO_TEST_SUITE(QS16)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(RunSmall, boost::unit_test::data::make({ false, true }) * SmallShapes() * ActivationFunctions() * boost::unit_test::data::xrange(3, 14, 1) * boost::unit_test::data::make({ 0.5f, 1.f }),
+ in_place, shape, act_function, fixed_point_position, alpha_beta)
+{
+ // Create activation layer info
+ ActivationLayerInfo act_info(act_function, alpha_beta, alpha_beta);
+
+ // Compute function
+ CLTensor dst = compute_activation_layer(in_place, shape, DataType::QS16, act_info, fixed_point_position);
+
+ // Compute reference
+ RawTensor ref_dst = Reference::compute_reference_activation_layer(shape, DataType::QS16, act_info, fixed_point_position);
+
+ // Validate output
+ validate(CLAccessor(dst), ref_dst, activation_layer_tolerance(act_function, fixed_point_position));
+}
+BOOST_AUTO_TEST_SUITE_END()
+BOOST_AUTO_TEST_SUITE_END()
+
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
-#endif /* DOXYGEN_SKIP_THIS */
+#endif /* DOXYGEN_SKIP_THIS */ \ No newline at end of file
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp
index 71dfcdc4e2..2b24fd5175 100644
--- a/tests/validation/NEON/ActivationLayer.cpp
+++ b/tests/validation/NEON/ActivationLayer.cpp
@@ -53,12 +53,13 @@ namespace
{
/** Define tolerance of the activation layer
*
+ * @param[in] dt The data type used.
* @param[in] activation The activation function used.
* @param[in] fixed_point_position Number of bits for the fractional part..
*
* @return Tolerance depending on the activation function.
*/
-float activation_layer_tolerance(ActivationLayerInfo::ActivationFunction activation, int fixed_point_position = 0)
+float activation_layer_tolerance(DataType dt, ActivationLayerInfo::ActivationFunction activation, int fixed_point_position = 0)
{
switch(activation)
{
@@ -66,7 +67,15 @@ float activation_layer_tolerance(ActivationLayerInfo::ActivationFunction activat
case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
case ActivationLayerInfo::ActivationFunction::SQRT:
case ActivationLayerInfo::ActivationFunction::TANH:
- return (fixed_point_position != 0) ? 5.f : 0.00001f;
+ switch(dt)
+ {
+ case DataType::QS8:
+ return 5.f;
+ case DataType::QS16:
+ return 11.f;
+ default:
+ return 0.00001f;
+ }
break;
default:
return 0.f;
@@ -124,7 +133,14 @@ Tensor compute_activation_layer(bool in_place, const TensorShape &shape, DataTyp
{
int min_bound = 0;
int max_bound = 0;
- std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<int8_t>(act_info.activation(), fixed_point_position);
+ if(dt == DataType::QS8)
+ {
+ std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<int8_t>(act_info.activation(), fixed_point_position);
+ }
+ else
+ {
+ std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<int16_t>(act_info.activation(), fixed_point_position);
+ }
std::uniform_int_distribution<> distribution(min_bound, max_bound);
library->fill(NEAccessor(src), distribution, 0);
}
@@ -206,7 +222,7 @@ BOOST_DATA_TEST_CASE(RunSmall, boost::unit_test::data::make({ false, true }) * S
RawTensor ref_dst = Reference::compute_reference_activation_layer(shape, dt, act_info);
// Validate output
- validate(NEAccessor(dst), ref_dst, activation_layer_tolerance(act_function));
+ validate(NEAccessor(dst), ref_dst, activation_layer_tolerance(dt, act_function));
}
BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
@@ -223,7 +239,7 @@ BOOST_DATA_TEST_CASE(RunLarge, boost::unit_test::data::make({ false, true }) * L
RawTensor ref_dst = Reference::compute_reference_activation_layer(shape, dt, act_info);
// Validate output
- validate(NEAccessor(dst), ref_dst, activation_layer_tolerance(act_function));
+ validate(NEAccessor(dst), ref_dst, activation_layer_tolerance(dt, act_function));
}
BOOST_AUTO_TEST_SUITE_END()
@@ -246,13 +262,13 @@ BOOST_DATA_TEST_CASE(RunSmall, boost::unit_test::data::make({ false, true }) * S
RawTensor ref_dst = Reference::compute_reference_activation_layer(shape, DataType::QS8, act_info, fixed_point_position);
// Validate output
- validate(NEAccessor(dst), ref_dst, activation_layer_tolerance(act_function, fixed_point_position));
+ validate(NEAccessor(dst), ref_dst, activation_layer_tolerance(DataType::QS8, act_function, fixed_point_position));
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE(QS16)
BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, boost::unit_test::data::make({ false, true }) * SmallShapes() * ActivationFunctions() * boost::unit_test::data::xrange(3, 6, 1) * boost::unit_test::data::make({ 0.5f, 1.f }),
+BOOST_DATA_TEST_CASE(RunSmall, boost::unit_test::data::make({ false, true }) * SmallShapes() * ActivationFunctions() * boost::unit_test::data::xrange(3, 14, 1) * boost::unit_test::data::make({ 0.5f, 1.f }),
in_place, shape, act_function, fixed_point_position, alpha_beta)
{
// Create activation layer info
@@ -265,7 +281,7 @@ BOOST_DATA_TEST_CASE(RunSmall, boost::unit_test::data::make({ false, true }) * S
RawTensor ref_dst = Reference::compute_reference_activation_layer(shape, DataType::QS16, act_info, fixed_point_position);
// Validate output
- validate(NEAccessor(dst), ref_dst, activation_layer_tolerance(act_function, fixed_point_position));
+ validate(NEAccessor(dst), ref_dst, activation_layer_tolerance(DataType::QS16, act_function, fixed_point_position));
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/tests/validation/Reference.cpp b/tests/validation/Reference.cpp
index 0a57fc0ea5..1b941870ba 100644
--- a/tests/validation/Reference.cpp
+++ b/tests/validation/Reference.cpp
@@ -459,7 +459,14 @@ RawTensor Reference::compute_reference_activation_layer(const TensorShape &shape
{
int min_bound = 0;
int max_bound = 0;
- std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<int8_t>(act_info.activation(), fixed_point_position);
+ if(dt == DataType::QS8)
+ {
+ std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<int8_t>(act_info.activation(), fixed_point_position);
+ }
+ else
+ {
+ std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<int16_t>(act_info.activation(), fixed_point_position);
+ }
std::uniform_int_distribution<> distribution(min_bound, max_bound);
library->fill(ref_src, distribution, 0);
}