aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/CL/cl_kernels/batchnormalization_layer.cl35
-rw-r--r--src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp53
-rw-r--r--src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs143
-rw-r--r--src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp171
-rw-r--r--src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp130
5 files changed, 381 insertions, 151 deletions
diff --git a/src/core/CL/cl_kernels/batchnormalization_layer.cl b/src/core/CL/cl_kernels/batchnormalization_layer.cl
index 0b61b5638c..29b62d3d92 100644
--- a/src/core/CL/cl_kernels/batchnormalization_layer.cl
+++ b/src/core/CL/cl_kernels/batchnormalization_layer.cl
@@ -93,8 +93,12 @@ __kernel void batchnormalization_layer(TENSOR3D_DECLARATION(input),
#endif /* not IN_PLACE */
VECTOR_DECLARATION(mean),
VECTOR_DECLARATION(var),
+#ifndef USE_DEFAULT_BETA
VECTOR_DECLARATION(beta),
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
VECTOR_DECLARATION(gamma),
+#endif /* USE_DEFAULT_GAMMA */
float epsilon)
{
Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
@@ -103,10 +107,14 @@ __kernel void batchnormalization_layer(TENSOR3D_DECLARATION(input),
#else /* IN_PLACE */
Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
#endif /* IN_PLACE */
- Vector mean = CONVERT_TO_VECTOR_STRUCT(mean);
- Vector var = CONVERT_TO_VECTOR_STRUCT(var);
- Vector beta = CONVERT_TO_VECTOR_STRUCT(beta);
+ Vector mean = CONVERT_TO_VECTOR_STRUCT(mean);
+ Vector var = CONVERT_TO_VECTOR_STRUCT(var);
+#ifndef USE_DEFAULT_BETA
+ Vector beta = CONVERT_TO_VECTOR_STRUCT(beta);
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
Vector gamma = CONVERT_TO_VECTOR_STRUCT(gamma);
+#endif /* USE_DEFAULT_GAMMA */
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
data = 0;
@@ -117,9 +125,7 @@ __kernel void batchnormalization_layer(TENSOR3D_DECLARATION(input),
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
x_bar = 0;
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- gamma_vec = 0;
- VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- beta_vec = 0;
+ res = 0;
const int current_slice = get_global_id(2);
@@ -132,11 +138,22 @@ __kernel void batchnormalization_layer(TENSOR3D_DECLARATION(input),
numerator = SUB_OP(data, numerator);
x_bar = MUL_OP(numerator, denominator);
+#ifndef USE_DEFAULT_GAMMA
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
gamma_vec = *((__global DATA_TYPE *)(gamma.ptr + current_slice * gamma.stride_x));
- beta_vec = *((__global DATA_TYPE *)(beta.ptr + current_slice * beta.stride_x));
+ res = MUL_OP(gamma_vec, x_bar);
+#else /* USE_DEFAULT_GAMMA */
+ // gamma is equal to 1, no need to perform multiplications
+ res = x_bar;
+#endif /* USE_DEFAULT_GAMMA */
+
+#ifndef USE_DEFAULT_BETA
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
- res = ADD_OP(MUL_OP(gamma_vec, x_bar), beta_vec);
+ beta_vec = *((__global DATA_TYPE *)(beta.ptr + current_slice * beta.stride_x));
+ // beta is not zero, hence we need to perform the addition
+ res = ADD_OP(res, beta_vec);
+#endif /* USE_DEFAULT_BETA */
res = ACTIVATION_FUNC(res);
@@ -144,4 +161,4 @@ __kernel void batchnormalization_layer(TENSOR3D_DECLARATION(input),
(res, 0, (__global DATA_TYPE *)out.ptr);
}
-#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) */ \ No newline at end of file
+#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) */
diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
index 95c8250ee7..62f21eed96 100644
--- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
@@ -46,9 +46,22 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
{
ARM_COMPUTE_UNUSED(epsilon);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, var, beta, gamma);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var, beta, gamma);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var, beta, gamma);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, var);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var);
+ if(beta != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, beta);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, beta);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, beta);
+ }
+ if(gamma != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, gamma);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, gamma);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, gamma);
+ }
+
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != mean->dimension(0));
if(act_info.enabled())
{
@@ -108,7 +121,7 @@ CLBatchNormalizationLayerKernel::CLBatchNormalizationLayerKernel()
void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma,
float epsilon, ActivationLayerInfo act_info)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var, beta, gamma);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var);
_input = input;
_output = output;
@@ -120,15 +133,9 @@ void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *out
_run_in_place = (output == nullptr) || (output == input);
- if(output != nullptr)
- {
- ARM_COMPUTE_ERROR_ON_NULLPTR(input->info(), output->info());
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), *input->info()->clone());
- }
-
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr,
- mean->info(), var->info(), beta->info(), gamma->info(), epsilon, act_info));
+ mean->info(), var->info(), (beta != nullptr) ? beta->info() : nullptr,
+ (gamma != nullptr) ? gamma->info() : nullptr, epsilon, act_info));
const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
@@ -141,13 +148,23 @@ void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *out
build_opts.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
build_opts.add_option_if(_run_in_place, "-DIN_PLACE");
build_opts.add_option_if(is_data_type_fixed_point(input->info()->data_type()), "-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position()));
+ build_opts.add_option_if(beta == nullptr, "-DUSE_DEFAULT_BETA");
+ build_opts.add_option_if(gamma == nullptr, "-DUSE_DEFAULT_GAMMA");
// Create kernel
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("batchnormalization_layer", build_opts.options()));
// Set kernel static arguments
unsigned int include_output = (!_run_in_place) ? 1 : 0;
- unsigned int idx = (1 + include_output) * num_arguments_per_3D_tensor() + 4 * num_arguments_per_1D_tensor(); // Skip the input and output parameters
+ unsigned int idx = (1 + include_output) * num_arguments_per_3D_tensor() + 2 * num_arguments_per_1D_tensor(); // Skip the input and output parameters
+ if(_beta != nullptr)
+ {
+ idx += num_arguments_per_1D_tensor(); // Skip beta parameter
+ }
+ if(_gamma != nullptr)
+ {
+ idx += num_arguments_per_1D_tensor(); // Skip gamma parameter
+ }
_kernel.setArg<cl_float>(idx++, _epsilon);
// Configure kernel window
@@ -191,8 +208,14 @@ void CLBatchNormalizationLayerKernel::run(const Window &window, cl::CommandQueue
unsigned int idx = (1 + include_output) * num_arguments_per_3D_tensor();
add_1D_tensor_argument(idx, _mean, vector_slice);
add_1D_tensor_argument(idx, _var, vector_slice);
- add_1D_tensor_argument(idx, _beta, vector_slice);
- add_1D_tensor_argument(idx, _gamma, vector_slice);
+ if(_beta != nullptr)
+ {
+ add_1D_tensor_argument(idx, _beta, vector_slice);
+ }
+ if(_gamma != nullptr)
+ {
+ add_1D_tensor_argument(idx, _gamma, vector_slice);
+ }
do
{
diff --git a/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs b/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs
index 7629b255b7..81be9679b2 100644
--- a/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs
+++ b/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs
@@ -50,6 +50,8 @@ precision mediump float;
*
* @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
* @note Epsilon parameter in the batch normalization equation should be given as a preprocessor argument using "#define EPSILON". e.g. "#define EPSILON 0.1"
+ * @note Beta is optional with default value of 0. If not provided, the preprocessor argument "USE_DEFAULT_BETA" should be given
+ * @note Gamma is optional with default value of 1. If not provided, the preprocessor argument "USE_DEFAULT_GAMMA" should be given
*
* @param[in] src_ptr Pointer to the first source tensor. Supported data types: F16/F32
* @param[in] src_attrs The attributes of the source tensor
@@ -59,10 +61,10 @@ precision mediump float;
* @param[in] mean_attrs The attributes of the mean tensor
* @param[in] var_ptr Pointer to the var tensor. Supported data types: same as @p src_ptr
* @param[in] var_attrs The attributes of the var tensor
- * @param[in] beta_ptr Pointer to the beta source tensor. Supported data types: same as @p src_ptr
- * @param[in] beta_attrs The attributes of the beta tensor
- * @param[in] gamma_ptr Pointer to the gamma source tensor. Supported data types: same as @p src_ptr
- * @param[in] gamma_attrs The attributes of the gamma tensor
+ * @param[in] beta_ptr (Optional) Pointer to the beta source tensor. If not provided, default value of beta is 0. Supported data types: same as @p src_ptr
+ * @param[in] beta_attrs (Optional) The attributes of the beta tensor
+ * @param[in] gamma_ptr (Optional) Pointer to the gamma source tensor. If not provided, default value of gamma is 1. Supported data types: same as @p src_ptr
+ * @param[in] gamma_attrs (Optional) The attributes of the gamma tensor
*/
SHADER_PARAMS_DECLARATION
{
@@ -70,8 +72,12 @@ SHADER_PARAMS_DECLARATION
Tensor3DAttributes dst_attrs;
VectorAttributes mean_attrs;
VectorAttributes var_attrs;
- VectorAttributes beta_attrs;
- VectorAttributes gamma_attrs;
+#ifndef USE_DEFAULT_BETA
+ VectorAttributes beta_attrs;
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
+ VectorAttributes gamma_attrs;
+#endif /* USE_DEFAULT_GAMMA */
};
#ifdef DATA_TYPE_FP32
@@ -79,24 +85,34 @@ TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
TENSOR_DECLARATION(3, meanBuffer, float, mean_ptr, mean_shift, 2, readonly);
TENSOR_DECLARATION(4, varBuffer, float, var_ptr, var_shift, 2, readonly);
+#ifndef USE_DEFAULT_BETA
TENSOR_DECLARATION(5, betaBuffer, float, beta_ptr, beta_shift, 2, readonly);
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
+#ifdef USE_DEFAULT_BETA
+TENSOR_DECLARATION(5, gammaBuffer, float, gamma_ptr, gamma_shift, 2, readonly);
+#else /* USE_DEFAULT_BETA */
TENSOR_DECLARATION(6, gammaBuffer, float, gamma_ptr, gamma_shift, 2, readonly);
+#endif /* USE_DEFAULT_BETA */
+#endif /* USE_DEFAULT_GAMMA */
void main(void)
{
- Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
- Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
- VectorIterator mean_iter = CONVERT_TO_VECTOR_ITERATOR(mean_attrs, mean_shift);
- VectorIterator var_iter = CONVERT_TO_VECTOR_ITERATOR(var_attrs, var_shift);
- VectorIterator beta_iter = CONVERT_TO_VECTOR_ITERATOR(beta_attrs, beta_shift);
- VectorIterator gamma_iter = CONVERT_TO_VECTOR_ITERATOR(gamma_attrs, gamma_shift);
+ Tensor3DIterator src_iter = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
+ Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
+ VectorIterator mean_iter = CONVERT_TO_VECTOR_ITERATOR(mean_attrs, mean_shift);
+ VectorIterator var_iter = CONVERT_TO_VECTOR_ITERATOR(var_attrs, var_shift);
+#ifndef USE_DEFAULT_BETA
+ VectorIterator beta_iter = CONVERT_TO_VECTOR_ITERATOR(beta_attrs, beta_shift);
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
+ VectorIterator gamma_iter = CONVERT_TO_VECTOR_ITERATOR(gamma_attrs, gamma_shift);
+#endif /* USE_DEFAULT_GAMMA */
float input_value = 0.f;
float denominator = 0.f;
float numerator = 0.f;
float x_bar = 0.f;
- float gamma_param = 0.f;
- float beta_param = 0.f;
uint current_slice = gl_GlobalInvocationID.z;
@@ -109,10 +125,18 @@ void main(void)
numerator = SUB_OP(input_value, numerator);
x_bar = MUL_OP(numerator, denominator);
- gamma_param = LOAD(gamma_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(gamma_iter, current_slice * beta_attrs.stride_x));
- beta_param = LOAD(beta_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(beta_iter, current_slice * beta_attrs.stride_x));
+#ifndef USE_DEFAULT_GAMMA
+ float gamma_param = LOAD(gamma_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(gamma_iter, current_slice * gamma_attrs.stride_x));
+
+ x_bar = MUL_OP(gamma_param, x_bar);
+#endif /* USE_DEFAULT_GAMMA */
+#ifndef USE_DEFAULT_BETA
+ float beta_param = LOAD(beta_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(beta_iter, current_slice * beta_attrs.stride_x));
+
+ x_bar = ADD_OP(x_bar, beta_param);
+#endif /* USE_DEFAULT_BETA */
- STORE_CURRENT_ITEM(dst_ptr, dst_iter, ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param)));
+ STORE_CURRENT_ITEM(dst_ptr, dst_iter, ACTIVATION_FUNC(x_bar));
}
#elif defined(DATA_TYPE_FP16)
@@ -120,8 +144,16 @@ TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
TENSOR_DECLARATION(3, meanBuffer, uvec2, mean_ptr, mean_shift, 3, readonly);
TENSOR_DECLARATION(4, varBuffer, uvec2, var_ptr, var_shift, 3, readonly);
+#ifndef USE_DEFAULT_BETA
TENSOR_DECLARATION(5, betaBuffer, uvec2, beta_ptr, beta_shift, 3, readonly);
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
+#ifdef USE_DEFAULT_BETA
+TENSOR_DECLARATION(5, gammaBuffer, uvec2, gamma_ptr, gamma_shift, 3, readonly);
+#else /* USE_DEFAULT_BETA */
TENSOR_DECLARATION(6, gammaBuffer, uvec2, gamma_ptr, gamma_shift, 3, readonly);
+#endif /* USE_DEFAULT_BETA */
+#endif /* USE_DEFAULT_GAMMA */
void main(void)
{
@@ -129,14 +161,18 @@ void main(void)
Tensor3DIterator dst_iter = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
VectorIterator mean_iter = CONVERT_TO_VECTOR_ITERATOR(mean_attrs, mean_shift);
VectorIterator var_iter = CONVERT_TO_VECTOR_ITERATOR(var_attrs, var_shift);
+#ifndef USE_DEFAULT_BETA
VectorIterator beta_iter = CONVERT_TO_VECTOR_ITERATOR(beta_attrs, beta_shift);
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
VectorIterator gamma_iter = CONVERT_TO_VECTOR_ITERATOR(gamma_attrs, gamma_shift);
+#endif /* USE_DEFAULT_GAMMA */
vec4 unpacked_s[5];
float denominator;
float numerator;
- float gamma_param;
- float beta_param;
+ float gamma_param = 1.f;
+ float beta_param = 0.f;
vec4 x_bar;
vec4 result;
@@ -144,68 +180,87 @@ void main(void)
unpacked_s[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(src_ptr, src_iter);
unpacked_s[1] = LOAD_UNPACK4_HALF(var_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(var_iter, current_slice * var_attrs.stride_x));
unpacked_s[2] = LOAD_UNPACK4_HALF(mean_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(mean_iter, current_slice * mean_attrs.stride_x));
- unpacked_s[3] = LOAD_UNPACK4_HALF(gamma_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(gamma_iter, current_slice * beta_attrs.stride_x));
+#ifndef USE_DEFAULT_GAMMA
+ unpacked_s[3] = LOAD_UNPACK4_HALF(gamma_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(gamma_iter, current_slice * gamma_attrs.stride_x));
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_BETA
unpacked_s[4] = LOAD_UNPACK4_HALF(beta_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(beta_iter, current_slice * beta_attrs.stride_x));
+#endif /* USE_DEFAULT_GAMMA */
if((current_slice % uint(4)) == uint(0))
{
denominator = unpacked_s[1].x;
denominator = INVSQRT_OP(ADD_OP(denominator, SQCVT_SAT(float(ESPILON))));
- //Calculate x bar and store results
- numerator = unpacked_s[2].x;
- x_bar = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
+ // Calculate x bar
+ numerator = unpacked_s[2].x;
+ x_bar = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
+#ifndef USE_DEFAULT_GAMMA
gamma_param = unpacked_s[3].x;
+#endif /* USE_DEFAULT_GAMMA */
+#ifndef USE_DEFAULT_BETA
beta_param = unpacked_s[4].x;
- result = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param));
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
+#endif /* USE_DEFAULT_BETA */
}
else if((current_slice % uint(4)) == uint(1))
{
denominator = unpacked_s[1].y;
denominator = INVSQRT_OP(ADD_OP(denominator, SQCVT_SAT(float(ESPILON))));
- //Calculate x bar and store results
- numerator = unpacked_s[2].y;
- x_bar = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
+ // Calculate x bar
+ numerator = unpacked_s[2].y;
+ x_bar = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
+#ifndef USE_DEFAULT_GAMMA
gamma_param = unpacked_s[3].y;
+#endif /* USE_DEFAULT_GAMMA */
+#ifndef USE_DEFAULT_BETA
beta_param = unpacked_s[4].y;
- result = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param));
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
+#endif /* USE_DEFAULT_BETA */
}
else if((current_slice % uint(4)) == uint(2))
{
denominator = unpacked_s[1].z;
denominator = INVSQRT_OP(ADD_OP(denominator, SQCVT_SAT(float(ESPILON))));
- //Calculate x bar and store results
- numerator = unpacked_s[2].z;
- x_bar = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
+ // Calculate x bar
+ numerator = unpacked_s[2].z;
+ x_bar = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
+#ifndef USE_DEFAULT_GAMMA
gamma_param = unpacked_s[3].z;
+#endif /* USE_DEFAULT_GAMMA */
+#ifndef USE_DEFAULT_BETA
beta_param = unpacked_s[4].z;
- result = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param));
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
+#endif /* USE_DEFAULT_BETA */
}
else
{
denominator = unpacked_s[1].w;
denominator = INVSQRT_OP(ADD_OP(denominator, SQCVT_SAT(float(ESPILON))));
- //Calculate x bar and store results
- numerator = unpacked_s[2].w;
- x_bar = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
+ // Calculate x bar
+ numerator = unpacked_s[2].w;
+ x_bar = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
+#ifndef USE_DEFAULT_GAMMA
gamma_param = unpacked_s[3].w;
+#endif /* USE_DEFAULT_GAMMA */
+#ifndef USE_DEFAULT_BETA
beta_param = unpacked_s[4].w;
- result = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param));
-
- STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
+#endif /* USE_DEFAULT_BETA */
}
+
+#ifndef USE_DEFAULT_GAMMA
+ x_bar = MUL_OP(gamma_param, x_bar);
+#endif /* USE_DEFAULT_GAMMA */
+#ifndef USE_DEFAULT_BETA
+ x_bar = ADD_OP(x_bar, beta_param);
+#endif /* USE_DEFAULT_BETA */
+
+ result = ACTIVATION_FUNC(x_bar);
+
+ STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
}
#endif /*DATA_TYPE_FP16*/
diff --git a/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp
index cd93f6997e..9a592dfe00 100644
--- a/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp
@@ -36,32 +36,118 @@
using namespace arm_compute;
-GCBatchNormalizationLayerKernel::GCBatchNormalizationLayerKernel()
- : _input(nullptr), _output(nullptr), _mean(nullptr), _var(nullptr), _beta(nullptr), _gamma(nullptr), _epsilon(0.0f)
+namespace
{
-}
-
-void GCBatchNormalizationLayerKernel::configure(const IGCTensor *input, IGCTensor *output, const IGCTensor *mean, const IGCTensor *var, const IGCTensor *beta, const IGCTensor *gamma,
- float epsilon, ActivationLayerInfo act_info)
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *mean, const ITensorInfo *var,
+ const ITensorInfo *beta, const ITensorInfo *gamma,
+ float epsilon, ActivationLayerInfo act_info)
{
+ ARM_COMPUTE_UNUSED(epsilon);
+ ARM_COMPUTE_UNUSED(var);
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
- ARM_COMPUTE_ERROR_ON_NULLPTR(output);
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, input->info()->data_type(), input->info()->fixed_point_position());
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(mean, var);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, mean, var, beta, gamma);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output, mean, var, beta, gamma);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(mean, var, beta, gamma);
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
+ }
+
+ if(beta != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, beta);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, beta);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, beta);
+ }
+ if(gamma != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, gamma);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, gamma);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, gamma);
+ }
if(act_info.enabled())
{
- ARM_COMPUTE_ERROR_ON(input->info()->data_type() != DataType::F32 && input->info()->data_type() != DataType::F16);
+ ARM_COMPUTE_ERROR_ON(input->data_type() != DataType::F32 && input->data_type() != DataType::F16);
ARM_COMPUTE_ERROR_ON(act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::RELU
&& act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU
&& act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
}
+ return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output,
+ ITensorInfo *mean, ITensorInfo *var,
+ ITensorInfo *beta, ITensorInfo *gamma)
+{
+ // Output tensor auto initialization if not yet initialized
+ auto_init_if_empty(*output, input->tensor_shape(), 1, input->data_type(), input->fixed_point_position());
+
+ unsigned int num_elems_processed_per_iteration = 1;
+ if(input->data_type() == DataType::F16)
+ {
+ num_elems_processed_per_iteration = 4;
+ }
+
+ // Configure kernel window
+ Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
+
+ AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
+ AccessWindowStatic mean_access(mean, 0, 0, mean->dimension(0) + 3, mean->dimension(1));
+ AccessWindowStatic var_access(var, 0, 0, var->dimension(0) + 3, var->dimension(1));
+
+ bool window_changed = false;
+ if(beta != nullptr)
+ {
+ AccessWindowStatic beta_access(beta, 0, 0, beta->dimension(0) + 3, beta->dimension(1));
+ if(gamma != nullptr)
+ {
+ AccessWindowStatic gamma_access(gamma, 0, 0, gamma->dimension(0) + 3, gamma->dimension(1));
+ window_changed = update_window_and_padding(win, input_access, output_access, mean_access, var_access, beta_access, gamma_access);
+ }
+ else
+ {
+ window_changed = update_window_and_padding(win, input_access, output_access, mean_access, var_access, beta_access);
+ }
+ }
+ else
+ {
+ if(gamma != nullptr)
+ {
+ AccessWindowStatic gamma_access(gamma, 0, 0, gamma->dimension(0) + 3, gamma->dimension(1));
+ window_changed = update_window_and_padding(win, input_access, output_access, mean_access, var_access, gamma_access);
+ }
+ else
+ {
+ window_changed = update_window_and_padding(win, input_access, output_access, mean_access, var_access);
+ }
+ }
+ output_access.set_valid_region(win, input->valid_region());
+
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_pair(err, win);
+}
+} // namespace
+
+GCBatchNormalizationLayerKernel::GCBatchNormalizationLayerKernel()
+ : _input(nullptr), _output(nullptr), _mean(nullptr), _var(nullptr), _beta(nullptr), _gamma(nullptr), _epsilon(0.0f)
+{
+}
+
+void GCBatchNormalizationLayerKernel::configure(const IGCTensor *input, IGCTensor *output, const IGCTensor *mean, const IGCTensor *var, const IGCTensor *beta, const IGCTensor *gamma,
+ float epsilon, ActivationLayerInfo act_info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, mean, var);
+
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), mean->info(), var->info(),
+ (beta != nullptr) ? beta->info() : nullptr, (gamma != nullptr) ? gamma->info() : nullptr,
+ epsilon, act_info));
_input = input;
_output = output;
@@ -71,12 +157,6 @@ void GCBatchNormalizationLayerKernel::configure(const IGCTensor *input, IGCTenso
_gamma = gamma;
_epsilon = epsilon;
- unsigned int num_elems_processed_per_iteration = 1;
- if(input->info()->data_type() == DataType::F16)
- {
- num_elems_processed_per_iteration = 4;
- }
-
// Set build options
std::set<std::string> build_opts;
std::string dt_name = (input->info()->data_type() == DataType::F32) ? "DATA_TYPE_FP32" : "DATA_TYPE_FP16";
@@ -85,6 +165,14 @@ void GCBatchNormalizationLayerKernel::configure(const IGCTensor *input, IGCTenso
build_opts.emplace(("#define LOCAL_SIZE_X " + support::cpp11::to_string(1)));
build_opts.emplace(("#define LOCAL_SIZE_Y " + support::cpp11::to_string(1)));
build_opts.emplace(("#define LOCAL_SIZE_Z " + support::cpp11::to_string(1)));
+ if(beta == nullptr)
+ {
+ build_opts.emplace("#define USE_DEFAULT_BETA");
+ }
+ if(gamma == nullptr)
+ {
+ build_opts.emplace("#define USE_DEFAULT_GAMMA");
+ }
if(act_info.enabled())
{
@@ -97,19 +185,25 @@ void GCBatchNormalizationLayerKernel::configure(const IGCTensor *input, IGCTenso
_kernel = static_cast<GCKernel>(GCKernelLibrary::get().create_kernel("batchnormalization_layer", build_opts));
// Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
+ auto win_config = validate_and_configure_window(input->info(), output->info(), mean->info(), var->info(),
+ (beta != nullptr) ? beta->info() : nullptr, (gamma != nullptr) ? gamma->info() : nullptr);
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
- AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
- AccessWindowStatic mean_access(mean->info(), 0, 0, mean->info()->dimension(0) + 3, mean->info()->dimension(1));
- AccessWindowStatic var_access(var->info(), 0, 0, var->info()->dimension(0) + 3, var->info()->dimension(1));
- AccessWindowStatic beta_access(beta->info(), 0, 0, beta->info()->dimension(0) + 3, beta->info()->dimension(1));
- AccessWindowStatic gamma_access(gamma->info(), 0, 0, gamma->info()->dimension(0) + 3, gamma->info()->dimension(1));
+ IGCKernel::configure(win_config.second);
+}
- update_window_and_padding(win, input_access, output_access, mean_access, var_access, beta_access, gamma_access);
- output_access.set_valid_region(win, input->info()->valid_region());
+Status GCBatchNormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *mean, const ITensorInfo *var,
+ const ITensorInfo *beta, const ITensorInfo *gamma,
+ float epsilon, ActivationLayerInfo act_info)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, mean, var, beta, gamma, epsilon, act_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(),
+ mean->clone().get(), var->clone().get(),
+ beta->clone().get(), gamma->clone().get())
+ .first);
- IGCKernel::configure(win);
+ return Status{};
}
void GCBatchNormalizationLayerKernel::run(const Window &window)
@@ -127,11 +221,18 @@ void GCBatchNormalizationLayerKernel::run(const Window &window)
Window vector_slice = window.first_slice_window_1D();
vector_slice.set(Window::DimX, Window::Dimension(0, 0, 0));
- unsigned int idx = 2 * num_arguments_per_3D_tensor();
- add_1D_tensor_argument(idx, _mean, 3, vector_slice);
- add_1D_tensor_argument(idx, _var, 4, vector_slice);
- add_1D_tensor_argument(idx, _beta, 5, vector_slice);
- add_1D_tensor_argument(idx, _gamma, 6, vector_slice);
+ unsigned int idx = 2 * num_arguments_per_3D_tensor();
+ unsigned int binding_point = 3;
+ add_1D_tensor_argument(idx, _mean, binding_point, vector_slice);
+ add_1D_tensor_argument(idx, _var, ++binding_point, vector_slice);
+ if(_beta != nullptr)
+ {
+ add_1D_tensor_argument(idx, _beta, ++binding_point, vector_slice);
+ }
+ if(_gamma != nullptr)
+ {
+ add_1D_tensor_argument(idx, _gamma, ++binding_point, vector_slice);
+ }
slice.shift(Window::DimX, -(_output->info()->padding()).left);
diff --git a/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp b/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
index 1f730a2c3c..d1bdfac2da 100644
--- a/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
@@ -62,9 +62,21 @@ validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const IT
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
}
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var, beta, gamma);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var, beta, gamma);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, var, beta, gamma);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, var);
+ if(beta != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, beta);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, beta);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, beta);
+ }
+ if(gamma != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, gamma);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, gamma);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, gamma);
+ }
ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != mean->dimension(0));
return Status{};
@@ -72,6 +84,12 @@ validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const IT
std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
{
+ if(output != nullptr)
+ {
+ // Output tensor auto initialization if not yet initialized
+ auto_init_if_empty(*output, *input->clone());
+ }
+
unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
@@ -99,13 +117,13 @@ void NEBatchNormalizationLayerKernel::batch_normalization_qs8(const Window &wind
const int fixed_point_position = _input->info()->fixed_point_position();
const auto input_mean = reinterpret_cast<const qint8_t *>(_mean->ptr_to_element(Coordinates(0, 0)));
const auto input_var = reinterpret_cast<const qint8_t *>(_var->ptr_to_element(Coordinates(0, 0)));
- const auto input_gamma = reinterpret_cast<const qint8_t *>(_gamma->ptr_to_element(Coordinates(0, 0)));
- const auto input_beta = reinterpret_cast<const qint8_t *>(_beta->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma = (_gamma != nullptr) ? reinterpret_cast<const qint8_t *>(_gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
+ const auto input_beta = (_beta != nullptr) ? reinterpret_cast<const qint8_t *>(_beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
qint8x16_t mean_vec = vdupq_n_qs8(0);
qint8x16_t var_vec = vdupq_n_qs8(0);
- qint8x16_t gamma_vec = vdupq_n_qs8(0);
- qint8x16_t beta_vec = vdupq_n_qs8(0);
+ qint8x16_t gamma_vec = vdupq_n_qs8(sqcvt_qs8_f32(1, fixed_point_position));
+ qint8x16_t beta_vec = vdupq_n_qs8(sqcvt_qs8_f32(0, fixed_point_position));
qint8x16_t denominator = vdupq_n_qs8(0);
const qint8x16_t epsilon_vec = vdupq_n_qs8(sqcvt_qs8_f32(_epsilon, fixed_point_position));
execute_window_loop(window, [&](const Coordinates & id)
@@ -113,10 +131,16 @@ void NEBatchNormalizationLayerKernel::batch_normalization_qs8(const Window &wind
if(slice != id.z())
{
// Conctruct vectors
- mean_vec = vdupq_n_qs8(*(input_mean + id.z()));
- var_vec = vdupq_n_qs8(*(input_var + id.z()));
- gamma_vec = vdupq_n_qs8(*(input_gamma + id.z()));
- beta_vec = vdupq_n_qs8(*(input_beta + id.z()));
+ mean_vec = vdupq_n_qs8(*(input_mean + id.z()));
+ var_vec = vdupq_n_qs8(*(input_var + id.z()));
+ if(input_gamma != nullptr)
+ {
+ gamma_vec = vdupq_n_qs8(*(input_gamma + id.z()));
+ }
+ if(input_beta != nullptr)
+ {
+ beta_vec = vdupq_n_qs8(*(input_beta + id.z()));
+ }
// Calculate denominator
denominator = vqinvsqrtq_qs8(vqaddq_qs8(var_vec, epsilon_vec), fixed_point_position);
@@ -146,13 +170,13 @@ void NEBatchNormalizationLayerKernel::batch_normalization_qs16(const Window &win
const int fixed_point_position = _input->info()->fixed_point_position();
const auto input_mean = reinterpret_cast<const qint16_t *>(_mean->ptr_to_element(Coordinates(0, 0)));
const auto input_var = reinterpret_cast<const qint16_t *>(_var->ptr_to_element(Coordinates(0, 0)));
- const auto input_gamma = reinterpret_cast<const qint16_t *>(_gamma->ptr_to_element(Coordinates(0, 0)));
- const auto input_beta = reinterpret_cast<const qint16_t *>(_beta->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma = (_gamma != nullptr) ? reinterpret_cast<const qint16_t *>(_gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
+ const auto input_beta = (_beta != nullptr) ? reinterpret_cast<const qint16_t *>(_beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
qint16x8_t mean_vec = vdupq_n_qs16(0);
qint16x8_t var_vec = vdupq_n_qs16(0);
- qint16x8_t gamma_vec = vdupq_n_qs16(0);
- qint16x8_t beta_vec = vdupq_n_qs16(0);
+ qint16x8_t gamma_vec = vdupq_n_qs16(sqcvt_qs16_f32(1, fixed_point_position));
+ qint16x8_t beta_vec = vdupq_n_qs16(sqcvt_qs16_f32(0, fixed_point_position));
qint16x8_t denominator = vdupq_n_qs16(0);
const qint16x8_t epsilon_vec = vdupq_n_qs16(sqcvt_qs16_f32(_epsilon, fixed_point_position));
execute_window_loop(window, [&](const Coordinates & id)
@@ -160,10 +184,16 @@ void NEBatchNormalizationLayerKernel::batch_normalization_qs16(const Window &win
if(slice != id.z())
{
// Conctruct vectors
- mean_vec = vdupq_n_qs16(*(input_mean + id.z()));
- var_vec = vdupq_n_qs16(*(input_var + id.z()));
- gamma_vec = vdupq_n_qs16(*(input_gamma + id.z()));
- beta_vec = vdupq_n_qs16(*(input_beta + id.z()));
+ mean_vec = vdupq_n_qs16(*(input_mean + id.z()));
+ var_vec = vdupq_n_qs16(*(input_var + id.z()));
+ if(input_gamma != nullptr)
+ {
+ gamma_vec = vdupq_n_qs16(*(input_gamma + id.z()));
+ }
+ if(input_beta != nullptr)
+ {
+ beta_vec = vdupq_n_qs16(*(input_beta + id.z()));
+ }
// Calculate denominator
denominator = vqinvsqrtq_qs16(vqaddq_qs16(var_vec, epsilon_vec), fixed_point_position);
@@ -194,12 +224,12 @@ void NEBatchNormalizationLayerKernel::batch_normalization_fp16(const Window &win
const auto input_mean = reinterpret_cast<const float16_t *>(_mean->ptr_to_element(Coordinates(0, 0)));
const auto input_var = reinterpret_cast<const float16_t *>(_var->ptr_to_element(Coordinates(0, 0)));
- const auto input_gamma = reinterpret_cast<const float16_t *>(_gamma->ptr_to_element(Coordinates(0, 0)));
- const auto input_beta = reinterpret_cast<const float16_t *>(_beta->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma = (_gamma != nullptr) ? reinterpret_cast<const float16_t *>(_gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
+ const auto input_beta = (_beta != nullptr) ? reinterpret_cast<const float16_t *>(_beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
float16x8_t mean_vec = vdupq_n_f16(0.0);
float16x8_t var_vec = vdupq_n_f16(0.0);
- float16x8_t gamma_vec = vdupq_n_f16(0.0);
+ float16x8_t gamma_vec = vdupq_n_f16(1.0);
float16x8_t beta_vec = vdupq_n_f16(0.0);
float16x8_t denominator = vdupq_n_f16(0.0);
const float16x8_t epsilon_vec = vdupq_n_f16(_epsilon);
@@ -208,10 +238,16 @@ void NEBatchNormalizationLayerKernel::batch_normalization_fp16(const Window &win
if(slice != id.z())
{
// Conctruct vectors
- mean_vec = vdupq_n_f16(*(input_mean + id.z()));
- var_vec = vdupq_n_f16(*(input_var + id.z()));
- gamma_vec = vdupq_n_f16(*(input_gamma + id.z()));
- beta_vec = vdupq_n_f16(*(input_beta + id.z()));
+ mean_vec = vdupq_n_f16(*(input_mean + id.z()));
+ var_vec = vdupq_n_f16(*(input_var + id.z()));
+ if(input_gamma != nullptr)
+ {
+ gamma_vec = vdupq_n_f16(*(input_gamma + id.z()));
+ }
+ if(input_beta != nullptr)
+ {
+ beta_vec = vdupq_n_f16(*(input_beta + id.z()));
+ }
// Calculate denominator
denominator = vinvsqrtq_f16(vaddq_f16(var_vec, epsilon_vec));
@@ -241,12 +277,12 @@ void NEBatchNormalizationLayerKernel::batch_normalization_fp32(const Window &win
const auto input_mean = reinterpret_cast<const float *>(_mean->ptr_to_element(Coordinates(0, 0)));
const auto input_var = reinterpret_cast<const float *>(_var->ptr_to_element(Coordinates(0, 0)));
- const auto input_gamma = reinterpret_cast<const float *>(_gamma->ptr_to_element(Coordinates(0, 0)));
- const auto input_beta = reinterpret_cast<const float *>(_beta->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma = (_gamma != nullptr) ? reinterpret_cast<const float *>(_gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
+ const auto input_beta = (_beta != nullptr) ? reinterpret_cast<const float *>(_beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
float32x4_t mean_vec = vdupq_n_f32(0.0);
float32x4_t var_vec = vdupq_n_f32(0.0);
- float32x4_t gamma_vec = vdupq_n_f32(0.0);
+ float32x4_t gamma_vec = vdupq_n_f32(1.0);
float32x4_t beta_vec = vdupq_n_f32(0.0);
float32x4_t denominator = vdupq_n_f32(0.0);
const float32x4_t epsilon_vec = vdupq_n_f32(_epsilon);
@@ -255,10 +291,16 @@ void NEBatchNormalizationLayerKernel::batch_normalization_fp32(const Window &win
if(slice != id.z())
{
// Conctruct vectors
- mean_vec = vdupq_n_f32(*(input_mean + id.z()));
- var_vec = vdupq_n_f32(*(input_var + id.z()));
- gamma_vec = vdupq_n_f32(*(input_gamma + id.z()));
- beta_vec = vdupq_n_f32(*(input_beta + id.z()));
+ mean_vec = vdupq_n_f32(*(input_mean + id.z()));
+ var_vec = vdupq_n_f32(*(input_var + id.z()));
+ if(input_gamma != nullptr)
+ {
+ gamma_vec = vdupq_n_f32(*(input_gamma + id.z()));
+ }
+ if(input_beta != nullptr)
+ {
+ beta_vec = vdupq_n_f32(*(input_beta + id.z()));
+ }
// Calculate denominator
denominator = vinvsqrtq_f32(vaddq_f32(var_vec, epsilon_vec));
@@ -335,21 +377,12 @@ void NEBatchNormalizationLayerKernel::configure(ITensor *input, ITensor *output,
const ITensor *beta, const ITensor *gamma,
float epsilon, ActivationLayerInfo act_info)
{
- ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var, beta, gamma);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var);
- ITensorInfo *output_info = nullptr;
-
- if(nullptr != output)
- {
- // Output tensor auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), *input->info());
-
- output_info = output->info();
- }
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output_info,
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr,
mean->info(), var->info(),
- beta->info(), gamma->info(),
+ (beta != nullptr) ? beta->info() : nullptr,
+ (gamma != nullptr) ? gamma->info() : nullptr,
epsilon, act_info));
_input = input;
@@ -361,7 +394,8 @@ void NEBatchNormalizationLayerKernel::configure(ITensor *input, ITensor *output,
_epsilon = epsilon;
_act_info = act_info;
- if(output != nullptr)
+ const bool run_in_place = (output == nullptr) || (output == input);
+ if(!run_in_place)
{
_output = output;
}
@@ -377,7 +411,7 @@ void NEBatchNormalizationLayerKernel::configure(ITensor *input, ITensor *output,
}
// Configure kernel window
- auto win_config = validate_and_configure_window(input->info(), output_info);
+ auto win_config = validate_and_configure_window(input->info(), (run_in_place) ? nullptr : output->info());
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
INEKernel::configure(win_config.second);
}