diff options
Diffstat (limited to 'src/core/CL/cl_kernels/batchnormalization_layer.cl')
-rw-r--r-- | src/core/CL/cl_kernels/batchnormalization_layer.cl | 24 |
1 files changed, 11 insertions, 13 deletions
diff --git a/src/core/CL/cl_kernels/batchnormalization_layer.cl b/src/core/CL/cl_kernels/batchnormalization_layer.cl index 0bb6cd7c5f..66d371c02f 100644 --- a/src/core/CL/cl_kernels/batchnormalization_layer.cl +++ b/src/core/CL/cl_kernels/batchnormalization_layer.cl @@ -29,19 +29,14 @@ #define INVSQRT_OP(a) rsqrt((a)) #define SQCVT_SAT(a) (a) -#if defined(VEC_SIZE) && defined(DATA_TYPE) - -#if defined(FUSED_ACTIVATION) -#define SELECT_TYPE VEC_DATA_TYPE(SELECT_DATA_TYPE, VEC_SIZE) -#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) -#include "activation_helpers.h" -#define ACTIVATION_FUNC(x) ACTIVATION_OP(FUSED_ACTIVATION, x) -#else /* defined(FUSED_ACTIVATION) */ -#define ACTIVATION_FUNC(x) (x) -#endif /* defined(FUSED_ACTIVATION) */ +#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(ACTIVATION_TYPE) +#include "activation_float_helpers.h" /** Apply batch normalization. * + * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu + * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively + * * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32 * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes) * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) @@ -144,7 +139,7 @@ __kernel void batchnormalization_layer_nchw(TENSOR3D_DECLARATION(input), res = ADD_OP(res, beta_vec); #endif /* USE_DEFAULT_BETA */ - res = ACTIVATION_FUNC(res); + res = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, res, A_VAL, B_VAL); VSTORE(VEC_SIZE) (res, 0, (__global DATA_TYPE *)out.ptr); @@ -152,6 +147,9 @@ __kernel void batchnormalization_layer_nchw(TENSOR3D_DECLARATION(input), /** Apply batch normalization on tensors with NHWC format. * + * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu + * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively + * * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32 * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes) * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) @@ -254,12 +252,12 @@ __kernel void batchnormalization_layer_nhwc(TENSOR3D_DECLARATION(input), res = ADD_OP(res, beta_vec); #endif /* USE_DEFAULT_BETA */ - res = ACTIVATION_FUNC(res); + res = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, res, A_VAL, B_VAL); VSTORE(VEC_SIZE) (res, 0, (__global DATA_TYPE *)out.ptr); } -#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) */ +#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DATA_TYPE)*/ #if defined(NUM_CHANNELS) && defined(DATA_TYPE) && defined(EPSILON) /** Fuse batchnorm parameters to convolution layer parameters |