From afd38f0c617d6f89b2b4532c6c44f116617e2b6f Mon Sep 17 00:00:00 2001 From: Felix Thomasmathibalan Date: Wed, 27 Sep 2023 17:46:17 +0100 Subject: Apply clang-format on repository Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir --- .../kernels/batchnormalization/impl/NEON/fp16.cpp | 166 ++++++++++++--------- 1 file changed, 97 insertions(+), 69 deletions(-) (limited to 'src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp') diff --git a/src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp b/src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp index ed5254a0a4..e3d9b670b3 100644 --- a/src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp +++ b/src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp @@ -24,8 +24,9 @@ #include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensorPack.h" #include "arm_compute/core/Window.h" -#include "src/core/NEON/NEMath.h" + #include "src/core/NEON/kernels/detail/NEActivationFunctionDetail.h" +#include "src/core/NEON/NEMath.h" #include "src/core/NEON/wrapper/wrapper.h" #include @@ -37,12 +38,26 @@ namespace arm_compute { namespace { -using BatchNomalizationPtr = void (*)(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, - float epsilon, ActivationLayerInfo &act_info, const Window &window); +using BatchNomalizationPtr = void (*)(ITensor *src, + ITensor *dst, + const ITensor *mean, + const ITensor *var, + const ITensor *beta, + const ITensor *gamma, + float epsilon, + ActivationLayerInfo &act_info, + const Window &window); template -void batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, - float epsilon, ActivationLayerInfo &act_info, const Window &window) +void batch_normalization(ITensor *src, + ITensor *dst, + const ITensor *mean, + const ITensor *var, + const ITensor *beta, + const ITensor *gamma, + float epsilon, + ActivationLayerInfo &act_info, + const Window &window) { /** SIMD vector tag type. */ using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t; @@ -57,86 +72,99 @@ void batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const Iterator input(src, win_collapsed); Iterator output(dst, win_collapsed); - const auto input_mean = reinterpret_cast(mean->ptr_to_element(Coordinates(0, 0))); - const auto input_var = reinterpret_cast(var->ptr_to_element(Coordinates(0, 0))); - const auto input_gamma = (gamma != nullptr) ? reinterpret_cast(gamma->ptr_to_element(Coordinates(0, 0))) : nullptr; - const auto input_beta = (beta != nullptr) ? reinterpret_cast(beta->ptr_to_element(Coordinates(0, 0))) : nullptr; + const auto input_mean = reinterpret_cast(mean->ptr_to_element(Coordinates(0, 0))); + const auto input_var = reinterpret_cast(var->ptr_to_element(Coordinates(0, 0))); + const auto input_gamma = + (gamma != nullptr) ? reinterpret_cast(gamma->ptr_to_element(Coordinates(0, 0))) : nullptr; + const auto input_beta = + (beta != nullptr) ? reinterpret_cast(beta->ptr_to_element(Coordinates(0, 0))) : nullptr; T activation_functor(act_info); const auto epsilon_vec = wrapper::vdup_n(static_cast(epsilon), ExactTagType{}); - execute_window_loop(win_collapsed, [&](const Coordinates &) - { - const auto input_ptr = reinterpret_cast(input.ptr()); - const auto output_ptr = reinterpret_cast(output.ptr()); - - // Perform core calculations using vector operations - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + execute_window_loop( + win_collapsed, + [&](const Coordinates &) { - // Conctruct vectors - const auto mean_vec = wrapper::vloadq(input_mean + x); - const auto var_vec = wrapper::vloadq(input_var + x); - const auto gamma_vec = (input_gamma != nullptr) ? wrapper::vloadq(input_gamma + x) : wrapper::vdup_n(static_cast(1.f), ExactTagType{}); - const auto beta_vec = (input_beta != nullptr) ? wrapper::vloadq(input_beta + x) : wrapper::vdup_n(static_cast(0.f), ExactTagType{}); - - // Calculate denominator - const auto denominator = wrapper::vinvsqrt(wrapper::vadd(var_vec, epsilon_vec)); - - // Calculate x bar - const auto numerator = wrapper::vsub(wrapper::vloadq(input_ptr + x), mean_vec); - const auto x_bar = wrapper::vmul(numerator, denominator); - auto res = wrapper::vmla(beta_vec, x_bar, gamma_vec); - - // Perform fused activation - if(act_info.enabled()) + const auto input_ptr = reinterpret_cast(input.ptr()); + const auto output_ptr = reinterpret_cast(output.ptr()); + + // Perform core calculations using vector operations + int x = window_start_x; + for (; x <= (window_end_x - window_step_x); x += window_step_x) { - activation_functor(res); + // Conctruct vectors + const auto mean_vec = wrapper::vloadq(input_mean + x); + const auto var_vec = wrapper::vloadq(input_var + x); + const auto gamma_vec = (input_gamma != nullptr) + ? wrapper::vloadq(input_gamma + x) + : wrapper::vdup_n(static_cast(1.f), ExactTagType{}); + const auto beta_vec = (input_beta != nullptr) + ? wrapper::vloadq(input_beta + x) + : wrapper::vdup_n(static_cast(0.f), ExactTagType{}); + + // Calculate denominator + const auto denominator = wrapper::vinvsqrt(wrapper::vadd(var_vec, epsilon_vec)); + + // Calculate x bar + const auto numerator = wrapper::vsub(wrapper::vloadq(input_ptr + x), mean_vec); + const auto x_bar = wrapper::vmul(numerator, denominator); + auto res = wrapper::vmla(beta_vec, x_bar, gamma_vec); + + // Perform fused activation + if (act_info.enabled()) + { + activation_functor(res); + } + + // Store results + wrapper::vstore(output_ptr + x, res); } - // Store results - wrapper::vstore(output_ptr + x, res); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - // Conctruct vectors - const float16_t gamma = (input_gamma != nullptr) ? input_gamma[x] : 1.f; - const float16_t beta = (input_beta != nullptr) ? input_beta[x] : 0.f; - - const float16_t denominator = sqrt(input_var[x] + epsilon); - const float16_t numerator = input_ptr[x] - input_mean[x]; - const float16_t x_bar = numerator / denominator; - float16_t res = beta + x_bar * gamma; - - // Perform fused activation - if(act_info.enabled()) + // Compute left-over elements + for (; x < window_end_x; ++x) { - activation_functor(res); + // Conctruct vectors + const float16_t gamma = (input_gamma != nullptr) ? input_gamma[x] : 1.f; + const float16_t beta = (input_beta != nullptr) ? input_beta[x] : 0.f; + + const float16_t denominator = sqrt(input_var[x] + epsilon); + const float16_t numerator = input_ptr[x] - input_mean[x]; + const float16_t x_bar = numerator / denominator; + float16_t res = beta + x_bar * gamma; + + // Perform fused activation + if (act_info.enabled()) + { + activation_functor(res); + } + + // Store results + *reinterpret_cast(output_ptr + x) = res; } - - // Store results - *reinterpret_cast(output_ptr + x) = res; - } - }, - input, output); + }, + input, output); } // Fused Batched Normalization with activation functions -static std::map fused_map = -{ - { ActivationLayerInfo::ActivationFunction::RELU, &batch_normalization> }, - { ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, &batch_normalization> }, - { ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, &batch_normalization> } -}; -} +static std::map fused_map = { + {ActivationLayerInfo::ActivationFunction::RELU, &batch_normalization>}, + {ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, &batch_normalization>}, + {ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, &batch_normalization>}}; +} // namespace namespace cpu { -void fp16_neon_batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, - float epsilon, ActivationLayerInfo &act_info, const Window &window) +void fp16_neon_batch_normalization(ITensor *src, + ITensor *dst, + const ITensor *mean, + const ITensor *var, + const ITensor *beta, + const ITensor *gamma, + float epsilon, + ActivationLayerInfo &act_info, + const Window &window) { - if(act_info.enabled()) + if (act_info.enabled()) { fused_map[act_info.activation()](src, dst, mean, var, beta, gamma, epsilon, act_info, window); } -- cgit v1.2.1