From 1167487ea8e54a76d0a3625e0aa84e2ad9ffd317 Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Wed, 7 Feb 2018 15:38:12 +0000 Subject: COMPMID-897 Merge batch normalization with bounded relu Change-Id: I9a607fe620f795cdea1a99fdd3f5f8c2fc76f980 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/119234 Tested-by: Jenkins Reviewed-by: Gian Marco Iodice Reviewed-by: Georgios Pinitas --- .../cs_shaders/batchnormalization_layer.cs | 22 ++++++++++++++++------ .../kernels/GCBatchNormalizationLayerKernel.cpp | 20 +++++++++++++++++--- 2 files changed, 33 insertions(+), 9 deletions(-) (limited to 'src/core/GLES_COMPUTE') diff --git a/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs b/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs index 53fb51557c..7629b255b7 100644 --- a/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs +++ b/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -36,6 +36,16 @@ precision mediump float; #define INVSQRT_OP(a) inversesqrt((a)) #define SQCVT_SAT(a) (a) +#if defined(LU_BRELU) +#define ACTIVATION_FUNC(x) min(max(x, float(B_VAL)), float(A_VAL)) +#elif defined(BRELU) +#define ACTIVATION_FUNC(x) min(max(x, float(0)), float(A_VAL)) +#elif defined(RELU) +#define ACTIVATION_FUNC(x) max(x, float(0)) +#else /* defined(FUSED_ACT) */ +#define ACTIVATION_FUNC(x) (x) +#endif /* defined(FUSED_ACT) */ + /** Apply batch normalization. * * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32" @@ -102,7 +112,7 @@ void main(void) gamma_param = LOAD(gamma_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(gamma_iter, current_slice * beta_attrs.stride_x)); beta_param = LOAD(beta_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(beta_iter, current_slice * beta_attrs.stride_x)); - STORE_CURRENT_ITEM(dst_ptr, dst_iter, ADD_OP(MUL_OP(gamma_param, x_bar), beta_param)); + STORE_CURRENT_ITEM(dst_ptr, dst_iter, ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param))); } #elif defined(DATA_TYPE_FP16) @@ -148,7 +158,7 @@ void main(void) gamma_param = unpacked_s[3].x; beta_param = unpacked_s[4].x; - result = ADD_OP(MUL_OP(gamma_param, x_bar), beta_param); + result = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param)); STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result); } @@ -163,7 +173,7 @@ void main(void) gamma_param = unpacked_s[3].y; beta_param = unpacked_s[4].y; - result = ADD_OP(MUL_OP(gamma_param, x_bar), beta_param); + result = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param)); STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result); } @@ -178,7 +188,7 @@ void main(void) gamma_param = unpacked_s[3].z; beta_param = unpacked_s[4].z; - result = ADD_OP(MUL_OP(gamma_param, x_bar), beta_param); + result = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param)); STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result); } @@ -193,7 +203,7 @@ void main(void) gamma_param = unpacked_s[3].w; beta_param = unpacked_s[4].w; - result = ADD_OP(MUL_OP(gamma_param, x_bar), beta_param); + result = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param)); STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result); } diff --git a/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp index dee2a5579b..a41b62fbab 100644 --- a/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp +++ b/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 ARM Limited. + * Copyright (c) 2017-2018 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -42,7 +42,7 @@ GCBatchNormalizationLayerKernel::GCBatchNormalizationLayerKernel() } void GCBatchNormalizationLayerKernel::configure(const IGCTensor *input, IGCTensor *output, const IGCTensor *mean, const IGCTensor *var, const IGCTensor *beta, const IGCTensor *gamma, - float epsilon) + float epsilon, ActivationLayerInfo act_info) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32); ARM_COMPUTE_ERROR_ON_NULLPTR(output); @@ -54,7 +54,14 @@ void GCBatchNormalizationLayerKernel::configure(const IGCTensor *input, IGCTenso ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output, mean, var, beta, gamma); ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output); ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(mean, var, beta, gamma); - ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) != mean->info()->dimension(0)); + if(act_info.enabled()) + { + ARM_COMPUTE_ERROR_ON(input->info()->data_type() != DataType::F32 && input->info()->data_type() != DataType::F16); + ARM_COMPUTE_ERROR_ON(act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::RELU + && act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU + && act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU); + ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a()); + } _input = input; _output = output; @@ -79,6 +86,13 @@ void GCBatchNormalizationLayerKernel::configure(const IGCTensor *input, IGCTenso build_opts.emplace(("#define LOCAL_SIZE_Y " + support::cpp11::to_string(1))); build_opts.emplace(("#define LOCAL_SIZE_Z " + support::cpp11::to_string(1))); + if(act_info.enabled()) + { + build_opts.emplace("#define " + string_from_activation_func(act_info.activation())); + build_opts.emplace("#define A_VAL " + float_to_string_with_full_precision(act_info.a())); + build_opts.emplace("#define B_VAL " + float_to_string_with_full_precision(act_info.b())); + } + // Create kernel _kernel = static_cast(GCKernelLibrary::get().create_kernel("batchnormalization_layer", build_opts)); -- cgit v1.2.1