aboutsummaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2018-02-07 15:38:12 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:47:18 +0000
commit1167487ea8e54a76d0a3625e0aa84e2ad9ffd317 (patch)
tree287dbc45e895c6b637fecc692c04bd4ae59580ae /src/core
parent4e1e7dcd581adecd5ad9c0f9503fc3c43f8222ef (diff)
downloadComputeLibrary-1167487ea8e54a76d0a3625e0aa84e2ad9ffd317.tar.gz
COMPMID-897 Merge batch normalization with bounded relu
Change-Id: I9a607fe620f795cdea1a99fdd3f5f8c2fc76f980 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/119234 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src/core')
-rw-r--r--src/core/CL/cl_kernels/batchnormalization_layer.cl21
-rw-r--r--src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp49
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs22
-rw-r--r--src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp20
4 files changed, 86 insertions, 26 deletions
diff --git a/src/core/CL/cl_kernels/batchnormalization_layer.cl b/src/core/CL/cl_kernels/batchnormalization_layer.cl
index fbffefb3c0..5ddeb1a6a1 100644
--- a/src/core/CL/cl_kernels/batchnormalization_layer.cl
+++ b/src/core/CL/cl_kernels/batchnormalization_layer.cl
@@ -23,6 +23,8 @@
*/
#include "helpers.h"
+#if defined(VEC_SIZE) && defined(DATA_TYPE)
+
#if defined(FIXED_POINT_POSITION)
#include "fixed_point.h"
@@ -42,6 +44,16 @@
#endif /* FIXED_POINT_POSITION */
+#if defined(LU_BRELU)
+#define ACTIVATION_FUNC(x) CLAMP(x, (DATA_TYPE)B_VAL, (DATA_TYPE)A_VAL)
+#elif defined(BRELU)
+#define ACTIVATION_FUNC(x) CLAMP(x, (DATA_TYPE)0, (DATA_TYPE)A_VAL)
+#elif defined(RELU)
+#define ACTIVATION_FUNC(x) max(x, (DATA_TYPE)0)
+#else /* FUSED_ACT */
+#define ACTIVATION_FUNC(x) (x)
+#endif /* FUSED_ACT */
+
/** Apply batch normalization.
*
* @param[in] input_ptr Pointer to the first source tensor. Supported data types: QS8/QS16/F16/F32
@@ -126,6 +138,13 @@ __kernel void batchnormalization_layer(TENSOR3D_DECLARATION(input),
gamma_vec = *((__global DATA_TYPE *)(gamma.ptr + current_slice * gamma.stride_x));
beta_vec = *((__global DATA_TYPE *)(beta.ptr + current_slice * beta.stride_x));
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ res = ADD_OP(MUL_OP(gamma_vec, x_bar), beta_vec);
+
+ res = ACTIVATION_FUNC(res);
+
VSTORE(VEC_SIZE)
- (ADD_OP(MUL_OP(gamma_vec, x_bar), beta_vec), 0, (__global DATA_TYPE *)out.ptr);
+ (res, 0, (__global DATA_TYPE *)out.ptr);
}
+
+#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) */ \ No newline at end of file
diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
index 663b044b5d..95487a23db 100644
--- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,7 @@ namespace
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
const ITensorInfo *mean, const ITensorInfo *var,
const ITensorInfo *beta, const ITensorInfo *gamma,
- float epsilon)
+ float epsilon, ActivationLayerInfo act_info)
{
ARM_COMPUTE_UNUSED(epsilon);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
@@ -50,6 +50,14 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var, beta, gamma);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var, beta, gamma);
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != mean->dimension(0));
+ if(act_info.enabled())
+ {
+ ActivationLayerInfo::ActivationFunction act = act_info.activation();
+ ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() != DataType::F32 && input->data_type() != DataType::F16);
+ ARM_COMPUTE_RETURN_ERROR_ON(act != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::RELU && act != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU
+ && act != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
+ ARM_COMPUTE_RETURN_ERROR_ON(act_info.b() > act_info.a());
+ }
if(output != nullptr && output->total_size() != 0)
{
@@ -98,7 +106,7 @@ CLBatchNormalizationLayerKernel::CLBatchNormalizationLayerKernel()
}
void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma,
- float epsilon)
+ float epsilon, ActivationLayerInfo act_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var, beta, gamma);
@@ -118,22 +126,22 @@ void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *out
}
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr,
- mean->info(), var->info(), beta->info(), gamma->info(), epsilon));
+ mean->info(), var->info(), beta->info(), gamma->info(), epsilon, act_info));
const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
// Set build options
- std::set<std::string> build_opts;
- build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
- build_opts.emplace(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)));
- build_opts.emplace(output == nullptr ? "-DIN_PLACE" : "");
- if(is_data_type_fixed_point(input->info()->data_type()))
- {
- build_opts.emplace("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position()));
- }
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
+ build_opts.add_option_if(act_info.enabled(), "-D" + string_from_activation_func(act_info.activation()));
+ build_opts.add_option_if(act_info.enabled(), "-DA_VAL=" + float_to_string_with_full_precision(act_info.a()));
+ build_opts.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
+ build_opts.add_option_if(output == nullptr, "-DIN_PLACE");
+ build_opts.add_option_if(is_data_type_fixed_point(input->info()->data_type()), "-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position()));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("batchnormalization_layer", build_opts));
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("batchnormalization_layer", build_opts.options()));
// Set kernel static arguments
unsigned int include_output = (output != nullptr) ? 1 : 0;
@@ -144,14 +152,23 @@ void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *out
auto win_config = validate_and_configure_window(input->info(), (output == nullptr) ? nullptr : output->info());
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure(win_config.second);
+
+ _config_id = "batch_normalization_layer_";
+ _config_id += string_from_data_type(input->info()->data_type());
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(input->info()->dimension(0));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(input->info()->dimension(1));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(input->info()->dimension(2));
}
Status CLBatchNormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output,
const ITensorInfo *mean, const ITensorInfo *var,
const ITensorInfo *beta, const ITensorInfo *gamma,
- float epsilon)
+ float epsilon, ActivationLayerInfo act_info)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, mean, var, beta, gamma, epsilon));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, mean, var, beta, gamma, epsilon, act_info));
ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), (output == nullptr) ? nullptr : output->clone().get()).first);
return Status{};
@@ -182,7 +199,7 @@ void CLBatchNormalizationLayerKernel::run(const Window &window, cl::CommandQueue
{
add_3D_tensor_argument(idx, _output, slice);
}
- enqueue(queue, *this, slice);
+ enqueue(queue, *this, slice, _lws_hint);
}
while(window.slide_window_slice_3D(slice));
}
diff --git a/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs b/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs
index 53fb51557c..7629b255b7 100644
--- a/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs
+++ b/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,6 +36,16 @@ precision mediump float;
#define INVSQRT_OP(a) inversesqrt((a))
#define SQCVT_SAT(a) (a)
+#if defined(LU_BRELU)
+#define ACTIVATION_FUNC(x) min(max(x, float(B_VAL)), float(A_VAL))
+#elif defined(BRELU)
+#define ACTIVATION_FUNC(x) min(max(x, float(0)), float(A_VAL))
+#elif defined(RELU)
+#define ACTIVATION_FUNC(x) max(x, float(0))
+#else /* defined(FUSED_ACT) */
+#define ACTIVATION_FUNC(x) (x)
+#endif /* defined(FUSED_ACT) */
+
/** Apply batch normalization.
*
* @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
@@ -102,7 +112,7 @@ void main(void)
gamma_param = LOAD(gamma_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(gamma_iter, current_slice * beta_attrs.stride_x));
beta_param = LOAD(beta_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(beta_iter, current_slice * beta_attrs.stride_x));
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, ADD_OP(MUL_OP(gamma_param, x_bar), beta_param));
+ STORE_CURRENT_ITEM(dst_ptr, dst_iter, ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param)));
}
#elif defined(DATA_TYPE_FP16)
@@ -148,7 +158,7 @@ void main(void)
gamma_param = unpacked_s[3].x;
beta_param = unpacked_s[4].x;
- result = ADD_OP(MUL_OP(gamma_param, x_bar), beta_param);
+ result = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param));
STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
}
@@ -163,7 +173,7 @@ void main(void)
gamma_param = unpacked_s[3].y;
beta_param = unpacked_s[4].y;
- result = ADD_OP(MUL_OP(gamma_param, x_bar), beta_param);
+ result = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param));
STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
}
@@ -178,7 +188,7 @@ void main(void)
gamma_param = unpacked_s[3].z;
beta_param = unpacked_s[4].z;
- result = ADD_OP(MUL_OP(gamma_param, x_bar), beta_param);
+ result = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param));
STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
}
@@ -193,7 +203,7 @@ void main(void)
gamma_param = unpacked_s[3].w;
beta_param = unpacked_s[4].w;
- result = ADD_OP(MUL_OP(gamma_param, x_bar), beta_param);
+ result = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param));
STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
}
diff --git a/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp
index dee2a5579b..a41b62fbab 100644
--- a/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,7 @@ GCBatchNormalizationLayerKernel::GCBatchNormalizationLayerKernel()
}
void GCBatchNormalizationLayerKernel::configure(const IGCTensor *input, IGCTensor *output, const IGCTensor *mean, const IGCTensor *var, const IGCTensor *beta, const IGCTensor *gamma,
- float epsilon)
+ float epsilon, ActivationLayerInfo act_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_NULLPTR(output);
@@ -54,7 +54,14 @@ void GCBatchNormalizationLayerKernel::configure(const IGCTensor *input, IGCTenso
ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output, mean, var, beta, gamma);
ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(mean, var, beta, gamma);
- ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) != mean->info()->dimension(0));
+ if(act_info.enabled())
+ {
+ ARM_COMPUTE_ERROR_ON(input->info()->data_type() != DataType::F32 && input->info()->data_type() != DataType::F16);
+ ARM_COMPUTE_ERROR_ON(act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::RELU
+ && act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU
+ && act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
+ ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
+ }
_input = input;
_output = output;
@@ -79,6 +86,13 @@ void GCBatchNormalizationLayerKernel::configure(const IGCTensor *input, IGCTenso
build_opts.emplace(("#define LOCAL_SIZE_Y " + support::cpp11::to_string(1)));
build_opts.emplace(("#define LOCAL_SIZE_Z " + support::cpp11::to_string(1)));
+ if(act_info.enabled())
+ {
+ build_opts.emplace("#define " + string_from_activation_func(act_info.activation()));
+ build_opts.emplace("#define A_VAL " + float_to_string_with_full_precision(act_info.a()));
+ build_opts.emplace("#define B_VAL " + float_to_string_with_full_precision(act_info.b()));
+ }
+
// Create kernel
_kernel = static_cast<GCKernel>(GCKernelLibrary::get().create_kernel("batchnormalization_layer", build_opts));