aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/batchnormalization
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/batchnormalization')
-rw-r--r--src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp166
-rw-r--r--src/core/NEON/kernels/batchnormalization/impl/NEON/fp32.cpp166
-rw-r--r--src/core/NEON/kernels/batchnormalization/impl/SVE/fp16.cpp120
-rw-r--r--src/core/NEON/kernels/batchnormalization/impl/SVE/fp32.cpp120
-rw-r--r--src/core/NEON/kernels/batchnormalization/impl/list.h31
5 files changed, 351 insertions, 252 deletions
diff --git a/src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp b/src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp
index ed5254a0a4..e3d9b670b3 100644
--- a/src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp
+++ b/src/core/NEON/kernels/batchnormalization/impl/NEON/fp16.cpp
@@ -24,8 +24,9 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensorPack.h"
#include "arm_compute/core/Window.h"
-#include "src/core/NEON/NEMath.h"
+
#include "src/core/NEON/kernels/detail/NEActivationFunctionDetail.h"
+#include "src/core/NEON/NEMath.h"
#include "src/core/NEON/wrapper/wrapper.h"
#include <arm_neon.h>
@@ -37,12 +38,26 @@ namespace arm_compute
{
namespace
{
-using BatchNomalizationPtr = void (*)(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma,
- float epsilon, ActivationLayerInfo &act_info, const Window &window);
+using BatchNomalizationPtr = void (*)(ITensor *src,
+ ITensor *dst,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo &act_info,
+ const Window &window);
template <typename T>
-void batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma,
- float epsilon, ActivationLayerInfo &act_info, const Window &window)
+void batch_normalization(ITensor *src,
+ ITensor *dst,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo &act_info,
+ const Window &window)
{
/** SIMD vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<float16_t, wrapper::traits::BitWidth::W128>;
@@ -57,86 +72,99 @@ void batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const
Iterator input(src, win_collapsed);
Iterator output(dst, win_collapsed);
- const auto input_mean = reinterpret_cast<const float16_t *>(mean->ptr_to_element(Coordinates(0, 0)));
- const auto input_var = reinterpret_cast<const float16_t *>(var->ptr_to_element(Coordinates(0, 0)));
- const auto input_gamma = (gamma != nullptr) ? reinterpret_cast<const float16_t *>(gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
- const auto input_beta = (beta != nullptr) ? reinterpret_cast<const float16_t *>(beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
+ const auto input_mean = reinterpret_cast<const float16_t *>(mean->ptr_to_element(Coordinates(0, 0)));
+ const auto input_var = reinterpret_cast<const float16_t *>(var->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma =
+ (gamma != nullptr) ? reinterpret_cast<const float16_t *>(gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
+ const auto input_beta =
+ (beta != nullptr) ? reinterpret_cast<const float16_t *>(beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
T activation_functor(act_info);
const auto epsilon_vec = wrapper::vdup_n(static_cast<float16_t>(epsilon), ExactTagType{});
- execute_window_loop(win_collapsed, [&](const Coordinates &)
- {
- const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
-
- // Perform core calculations using vector operations
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ execute_window_loop(
+ win_collapsed,
+ [&](const Coordinates &)
{
- // Conctruct vectors
- const auto mean_vec = wrapper::vloadq(input_mean + x);
- const auto var_vec = wrapper::vloadq(input_var + x);
- const auto gamma_vec = (input_gamma != nullptr) ? wrapper::vloadq(input_gamma + x) : wrapper::vdup_n(static_cast<float16_t>(1.f), ExactTagType{});
- const auto beta_vec = (input_beta != nullptr) ? wrapper::vloadq(input_beta + x) : wrapper::vdup_n(static_cast<float16_t>(0.f), ExactTagType{});
-
- // Calculate denominator
- const auto denominator = wrapper::vinvsqrt(wrapper::vadd(var_vec, epsilon_vec));
-
- // Calculate x bar
- const auto numerator = wrapper::vsub(wrapper::vloadq(input_ptr + x), mean_vec);
- const auto x_bar = wrapper::vmul(numerator, denominator);
- auto res = wrapper::vmla(beta_vec, x_bar, gamma_vec);
-
- // Perform fused activation
- if(act_info.enabled())
+ const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr());
+ const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
+
+ // Perform core calculations using vector operations
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
{
- activation_functor(res);
+ // Conctruct vectors
+ const auto mean_vec = wrapper::vloadq(input_mean + x);
+ const auto var_vec = wrapper::vloadq(input_var + x);
+ const auto gamma_vec = (input_gamma != nullptr)
+ ? wrapper::vloadq(input_gamma + x)
+ : wrapper::vdup_n(static_cast<float16_t>(1.f), ExactTagType{});
+ const auto beta_vec = (input_beta != nullptr)
+ ? wrapper::vloadq(input_beta + x)
+ : wrapper::vdup_n(static_cast<float16_t>(0.f), ExactTagType{});
+
+ // Calculate denominator
+ const auto denominator = wrapper::vinvsqrt(wrapper::vadd(var_vec, epsilon_vec));
+
+ // Calculate x bar
+ const auto numerator = wrapper::vsub(wrapper::vloadq(input_ptr + x), mean_vec);
+ const auto x_bar = wrapper::vmul(numerator, denominator);
+ auto res = wrapper::vmla(beta_vec, x_bar, gamma_vec);
+
+ // Perform fused activation
+ if (act_info.enabled())
+ {
+ activation_functor(res);
+ }
+
+ // Store results
+ wrapper::vstore(output_ptr + x, res);
}
- // Store results
- wrapper::vstore(output_ptr + x, res);
- }
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- // Conctruct vectors
- const float16_t gamma = (input_gamma != nullptr) ? input_gamma[x] : 1.f;
- const float16_t beta = (input_beta != nullptr) ? input_beta[x] : 0.f;
-
- const float16_t denominator = sqrt(input_var[x] + epsilon);
- const float16_t numerator = input_ptr[x] - input_mean[x];
- const float16_t x_bar = numerator / denominator;
- float16_t res = beta + x_bar * gamma;
-
- // Perform fused activation
- if(act_info.enabled())
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
{
- activation_functor(res);
+ // Conctruct vectors
+ const float16_t gamma = (input_gamma != nullptr) ? input_gamma[x] : 1.f;
+ const float16_t beta = (input_beta != nullptr) ? input_beta[x] : 0.f;
+
+ const float16_t denominator = sqrt(input_var[x] + epsilon);
+ const float16_t numerator = input_ptr[x] - input_mean[x];
+ const float16_t x_bar = numerator / denominator;
+ float16_t res = beta + x_bar * gamma;
+
+ // Perform fused activation
+ if (act_info.enabled())
+ {
+ activation_functor(res);
+ }
+
+ // Store results
+ *reinterpret_cast<float16_t *>(output_ptr + x) = res;
}
-
- // Store results
- *reinterpret_cast<float16_t *>(output_ptr + x) = res;
- }
- },
- input, output);
+ },
+ input, output);
}
// Fused Batched Normalization with activation functions
-static std::map<ActivationLayerInfo::ActivationFunction, BatchNomalizationPtr> fused_map =
-{
- { ActivationLayerInfo::ActivationFunction::RELU, &batch_normalization<detail::relu<float16_t, 8>> },
- { ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, &batch_normalization<detail::brelu<float16_t, 8>> },
- { ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, &batch_normalization<detail::lubrelu<float16_t, 8>> }
-};
-}
+static std::map<ActivationLayerInfo::ActivationFunction, BatchNomalizationPtr> fused_map = {
+ {ActivationLayerInfo::ActivationFunction::RELU, &batch_normalization<detail::relu<float16_t, 8>>},
+ {ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, &batch_normalization<detail::brelu<float16_t, 8>>},
+ {ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, &batch_normalization<detail::lubrelu<float16_t, 8>>}};
+} // namespace
namespace cpu
{
-void fp16_neon_batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma,
- float epsilon, ActivationLayerInfo &act_info, const Window &window)
+void fp16_neon_batch_normalization(ITensor *src,
+ ITensor *dst,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo &act_info,
+ const Window &window)
{
- if(act_info.enabled())
+ if (act_info.enabled())
{
fused_map[act_info.activation()](src, dst, mean, var, beta, gamma, epsilon, act_info, window);
}
diff --git a/src/core/NEON/kernels/batchnormalization/impl/NEON/fp32.cpp b/src/core/NEON/kernels/batchnormalization/impl/NEON/fp32.cpp
index d6e22e1843..4e1654ee6b 100644
--- a/src/core/NEON/kernels/batchnormalization/impl/NEON/fp32.cpp
+++ b/src/core/NEON/kernels/batchnormalization/impl/NEON/fp32.cpp
@@ -24,8 +24,9 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensorPack.h"
#include "arm_compute/core/Window.h"
-#include "src/core/NEON/NEMath.h"
+
#include "src/core/NEON/kernels/detail/NEActivationFunctionDetail.h"
+#include "src/core/NEON/NEMath.h"
#include "src/core/NEON/wrapper/wrapper.h"
#include <arm_neon.h>
@@ -36,12 +37,26 @@ namespace arm_compute
{
namespace
{
-using BatchNomalizationPtr = void (*)(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma,
- float epsilon, ActivationLayerInfo &act_info, const Window &window);
+using BatchNomalizationPtr = void (*)(ITensor *src,
+ ITensor *dst,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo &act_info,
+ const Window &window);
template <typename T>
-void batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma,
- float epsilon, ActivationLayerInfo &act_info, const Window &window)
+void batch_normalization(ITensor *src,
+ ITensor *dst,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo &act_info,
+ const Window &window)
{
/** SIMD vector tag type. */
using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<float, wrapper::traits::BitWidth::W128>;
@@ -56,86 +71,99 @@ void batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const
Iterator input(src, win_collapsed);
Iterator output(dst, win_collapsed);
- const auto input_mean = reinterpret_cast<const float *>(mean->ptr_to_element(Coordinates(0, 0)));
- const auto input_var = reinterpret_cast<const float *>(var->ptr_to_element(Coordinates(0, 0)));
- const auto input_gamma = (gamma != nullptr) ? reinterpret_cast<const float *>(gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
- const auto input_beta = (beta != nullptr) ? reinterpret_cast<const float *>(beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
+ const auto input_mean = reinterpret_cast<const float *>(mean->ptr_to_element(Coordinates(0, 0)));
+ const auto input_var = reinterpret_cast<const float *>(var->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma =
+ (gamma != nullptr) ? reinterpret_cast<const float *>(gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
+ const auto input_beta =
+ (beta != nullptr) ? reinterpret_cast<const float *>(beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
T activation_functor(act_info);
const auto epsilon_vec = wrapper::vdup_n(static_cast<float>(epsilon), ExactTagType{});
- execute_window_loop(win_collapsed, [&](const Coordinates &)
- {
- const auto input_ptr = reinterpret_cast<const float *>(input.ptr());
- const auto output_ptr = reinterpret_cast<float *>(output.ptr());
-
- // Perform core calculations using vector operations
- int x = window_start_x;
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ execute_window_loop(
+ win_collapsed,
+ [&](const Coordinates &)
{
- // Conctruct vectors
- const auto mean_vec = wrapper::vloadq(input_mean + x);
- const auto var_vec = wrapper::vloadq(input_var + x);
- const auto gamma_vec = (input_gamma != nullptr) ? wrapper::vloadq(input_gamma + x) : wrapper::vdup_n(static_cast<float>(1.f), ExactTagType{});
- const auto beta_vec = (input_beta != nullptr) ? wrapper::vloadq(input_beta + x) : wrapper::vdup_n(static_cast<float>(0.f), ExactTagType{});
-
- // Calculate denominator
- const auto denominator = wrapper::vinvsqrt(wrapper::vadd(var_vec, epsilon_vec));
-
- // Calculate x bar
- const auto numerator = wrapper::vsub(wrapper::vloadq(input_ptr + x), mean_vec);
- const auto x_bar = wrapper::vmul(numerator, denominator);
- auto res = wrapper::vmla(beta_vec, x_bar, gamma_vec);
-
- // Perform fused activation
- if(act_info.enabled())
+ const auto input_ptr = reinterpret_cast<const float *>(input.ptr());
+ const auto output_ptr = reinterpret_cast<float *>(output.ptr());
+
+ // Perform core calculations using vector operations
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
{
- activation_functor(res);
+ // Conctruct vectors
+ const auto mean_vec = wrapper::vloadq(input_mean + x);
+ const auto var_vec = wrapper::vloadq(input_var + x);
+ const auto gamma_vec = (input_gamma != nullptr)
+ ? wrapper::vloadq(input_gamma + x)
+ : wrapper::vdup_n(static_cast<float>(1.f), ExactTagType{});
+ const auto beta_vec = (input_beta != nullptr)
+ ? wrapper::vloadq(input_beta + x)
+ : wrapper::vdup_n(static_cast<float>(0.f), ExactTagType{});
+
+ // Calculate denominator
+ const auto denominator = wrapper::vinvsqrt(wrapper::vadd(var_vec, epsilon_vec));
+
+ // Calculate x bar
+ const auto numerator = wrapper::vsub(wrapper::vloadq(input_ptr + x), mean_vec);
+ const auto x_bar = wrapper::vmul(numerator, denominator);
+ auto res = wrapper::vmla(beta_vec, x_bar, gamma_vec);
+
+ // Perform fused activation
+ if (act_info.enabled())
+ {
+ activation_functor(res);
+ }
+
+ // Store results
+ wrapper::vstore(output_ptr + x, res);
}
- // Store results
- wrapper::vstore(output_ptr + x, res);
- }
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- // Conctruct vectors
- const float gamma = (input_gamma != nullptr) ? input_gamma[x] : 1.f;
- const float beta = (input_beta != nullptr) ? input_beta[x] : 0.f;
-
- const float denominator = sqrt(input_var[x] + epsilon);
- const float numerator = input_ptr[x] - input_mean[x];
- const float x_bar = numerator / denominator;
- float res = beta + x_bar * gamma;
-
- // Perform fused activation
- if(act_info.enabled())
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
{
- activation_functor(res);
+ // Conctruct vectors
+ const float gamma = (input_gamma != nullptr) ? input_gamma[x] : 1.f;
+ const float beta = (input_beta != nullptr) ? input_beta[x] : 0.f;
+
+ const float denominator = sqrt(input_var[x] + epsilon);
+ const float numerator = input_ptr[x] - input_mean[x];
+ const float x_bar = numerator / denominator;
+ float res = beta + x_bar * gamma;
+
+ // Perform fused activation
+ if (act_info.enabled())
+ {
+ activation_functor(res);
+ }
+
+ // Store results
+ *reinterpret_cast<float *>(output_ptr + x) = res;
}
-
- // Store results
- *reinterpret_cast<float *>(output_ptr + x) = res;
- }
- },
- input, output);
+ },
+ input, output);
}
// Fused Batched Normalization with activation functions
-static std::map<ActivationLayerInfo::ActivationFunction, BatchNomalizationPtr> fused_map =
-{
- { ActivationLayerInfo::ActivationFunction::RELU, &batch_normalization<detail::relu<float, 4>> },
- { ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, &batch_normalization<detail::brelu<float, 4>> },
- { ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, &batch_normalization<detail::lubrelu<float, 4>> }
-};
-}
+static std::map<ActivationLayerInfo::ActivationFunction, BatchNomalizationPtr> fused_map = {
+ {ActivationLayerInfo::ActivationFunction::RELU, &batch_normalization<detail::relu<float, 4>>},
+ {ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, &batch_normalization<detail::brelu<float, 4>>},
+ {ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, &batch_normalization<detail::lubrelu<float, 4>>}};
+} // namespace
namespace cpu
{
-void fp32_neon_batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma,
- float epsilon, ActivationLayerInfo &act_info, const Window &window)
+void fp32_neon_batch_normalization(ITensor *src,
+ ITensor *dst,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo &act_info,
+ const Window &window)
{
- if(act_info.enabled())
+ if (act_info.enabled())
{
fused_map[act_info.activation()](src, dst, mean, var, beta, gamma, epsilon, act_info, window);
}
diff --git a/src/core/NEON/kernels/batchnormalization/impl/SVE/fp16.cpp b/src/core/NEON/kernels/batchnormalization/impl/SVE/fp16.cpp
index a715b9d3ee..48caaa3e63 100644
--- a/src/core/NEON/kernels/batchnormalization/impl/SVE/fp16.cpp
+++ b/src/core/NEON/kernels/batchnormalization/impl/SVE/fp16.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021 Arm Limited.
+ * Copyright (c) 2020-2021,2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,20 +24,29 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensorPack.h"
#include "arm_compute/core/Window.h"
+#include "arm_compute/function_info/ActivationLayerInfo.h"
+
#include "src/core/NEON/SVEMath.h"
#include <cmath>
#include <cstddef>
-#if defined(ENABLE_SVE)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
#include <arm_sve.h>
namespace arm_compute
{
namespace cpu
{
-void fp16_sve_batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma,
- float epsilon, ActivationLayerInfo &act_info, const Window &window)
+void fp16_sve_batch_normalization(ITensor *src,
+ ITensor *dst,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo &act_info,
+ const Window &window)
{
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
@@ -48,69 +57,74 @@ void fp16_sve_batch_normalization(ITensor *src, ITensor *dst, const ITensor *mea
Iterator input(src, win_collapsed);
Iterator output(dst, win_collapsed);
- const auto input_mean = reinterpret_cast<const float16_t *>(mean->ptr_to_element(Coordinates(0, 0)));
- const auto input_var = reinterpret_cast<const float16_t *>(var->ptr_to_element(Coordinates(0, 0)));
- const auto input_gamma = (gamma != nullptr) ? reinterpret_cast<const float16_t *>(gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
- const auto input_beta = (beta != nullptr) ? reinterpret_cast<const float16_t *>(beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
+ const auto input_mean = reinterpret_cast<const float16_t *>(mean->ptr_to_element(Coordinates(0, 0)));
+ const auto input_var = reinterpret_cast<const float16_t *>(var->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma =
+ (gamma != nullptr) ? reinterpret_cast<const float16_t *>(gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
+ const auto input_beta =
+ (beta != nullptr) ? reinterpret_cast<const float16_t *>(beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
const auto epsilon_vec = svdup_n_f16(epsilon);
const auto const_1 = svdup_n_f16(1.f);
const auto const_0 = svdup_n_f16(0.f);
const auto va = svdup_n_f16(act_info.a());
const auto vb = svdup_n_f16(act_info.b());
- execute_window_loop(win_collapsed, [&](const Coordinates &)
- {
- const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr());
- const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
-
- // Compute S elements per iteration
- int x = window_start_x;
- svbool_t pg = svwhilelt_b16(x, window_end_x);
- do
+ execute_window_loop(
+ win_collapsed,
+ [&](const Coordinates &)
{
- // Conctruct vectors
- const auto mean_vec = svld1_f16(pg, input_mean + x);
- const auto var_vec = svld1_f16(pg, input_var + x);
- const auto gamma_vec = (input_gamma != nullptr) ? svld1_f16(pg, input_gamma + x) : const_1;
- const auto beta_vec = (input_beta != nullptr) ? svld1_f16(pg, input_beta + x) : const_0;
+ const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr());
+ const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
- // Calculate denominator
- const auto tmp = svadd_f16_z(pg, var_vec, epsilon_vec);
- auto denominator = svrsqrte_f16(tmp);
- denominator = svmul_f16_z(pg, svrsqrts_f16(svmul_f16_z(pg, tmp, denominator), denominator), denominator);
- denominator = svmul_f16_z(pg, svrsqrts_f16(svmul_f16_z(pg, tmp, denominator), denominator), denominator);
+ // Compute S elements per iteration
+ int x = window_start_x;
+ svbool_t pg = svwhilelt_b16(x, window_end_x);
+ do
+ {
+ // Conctruct vectors
+ const auto mean_vec = svld1_f16(pg, input_mean + x);
+ const auto var_vec = svld1_f16(pg, input_var + x);
+ const auto gamma_vec = (input_gamma != nullptr) ? svld1_f16(pg, input_gamma + x) : const_1;
+ const auto beta_vec = (input_beta != nullptr) ? svld1_f16(pg, input_beta + x) : const_0;
- // Calculate x bar
- const auto numerator = svsub_f16_z(pg, svld1_f16(pg, input_ptr + x), mean_vec);
- const auto x_bar = svmul_f16_z(pg, numerator, denominator);
- auto res = svmla_f16_z(pg, beta_vec, x_bar, gamma_vec);
+ // Calculate denominator
+ const auto tmp = svadd_f16_z(pg, var_vec, epsilon_vec);
+ auto denominator = svrsqrte_f16(tmp);
+ denominator =
+ svmul_f16_z(pg, svrsqrts_f16(svmul_f16_z(pg, tmp, denominator), denominator), denominator);
+ denominator =
+ svmul_f16_z(pg, svrsqrts_f16(svmul_f16_z(pg, tmp, denominator), denominator), denominator);
- // Perform fused activation
- if(act_info.enabled())
- {
- if(act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU)
- {
- res = svmax_f16_z(pg, const_0, res);
- }
- else if(act_info.activation() == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
- {
- res = svmin_f16_z(pg, va, svmax_f16_z(pg, const_0, res));
- }
- else if(act_info.activation() == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+ // Calculate x bar
+ const auto numerator = svsub_f16_z(pg, svld1_f16(pg, input_ptr + x), mean_vec);
+ const auto x_bar = svmul_f16_z(pg, numerator, denominator);
+ auto res = svmla_f16_z(pg, beta_vec, x_bar, gamma_vec);
+
+ // Perform fused activation
+ if (act_info.enabled())
{
- res = svmin_f16_z(pg, va, svmax_f16_z(pg, vb, res));
+ if (act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU)
+ {
+ res = svmax_f16_z(pg, const_0, res);
+ }
+ else if (act_info.activation() == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
+ {
+ res = svmin_f16_z(pg, va, svmax_f16_z(pg, const_0, res));
+ }
+ else if (act_info.activation() == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+ {
+ res = svmin_f16_z(pg, va, svmax_f16_z(pg, vb, res));
+ }
}
- }
- // Store results
- svst1_f16(pg, output_ptr + x, res);
+ // Store results
+ svst1_f16(pg, output_ptr + x, res);
- x += svcntw();
- pg = svwhilelt_b16(x, window_end_x);
- }
- while(svptest_any(svptrue_b16(), pg));
- },
- input, output);
+ x += svcntw();
+ pg = svwhilelt_b16(x, window_end_x);
+ } while (svptest_any(svptrue_b16(), pg));
+ },
+ input, output);
}
} // namespace cpu
} // namespace arm_compute
diff --git a/src/core/NEON/kernels/batchnormalization/impl/SVE/fp32.cpp b/src/core/NEON/kernels/batchnormalization/impl/SVE/fp32.cpp
index 7cc570d8aa..df4fbfe607 100644
--- a/src/core/NEON/kernels/batchnormalization/impl/SVE/fp32.cpp
+++ b/src/core/NEON/kernels/batchnormalization/impl/SVE/fp32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021 Arm Limited.
+ * Copyright (c) 2020-2021,2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,20 +24,29 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensorPack.h"
#include "arm_compute/core/Window.h"
+#include "arm_compute/function_info/ActivationLayerInfo.h"
+
#include "src/core/NEON/SVEMath.h"
#include <cmath>
#include <cstddef>
-#if defined(ENABLE_SVE)
+#if defined(ARM_COMPUTE_ENABLE_SVE)
#include <arm_sve.h>
namespace arm_compute
{
namespace cpu
{
-void fp32_sve_batch_normalization(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma,
- float epsilon, ActivationLayerInfo &act_info, const Window &window)
+void fp32_sve_batch_normalization(ITensor *src,
+ ITensor *dst,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo &act_info,
+ const Window &window)
{
const auto window_start_x = static_cast<int>(window.x().start());
const auto window_end_x = static_cast<int>(window.x().end());
@@ -48,69 +57,74 @@ void fp32_sve_batch_normalization(ITensor *src, ITensor *dst, const ITensor *mea
Iterator input(src, win_collapsed);
Iterator output(dst, win_collapsed);
- const auto input_mean = reinterpret_cast<const float *>(mean->ptr_to_element(Coordinates(0, 0)));
- const auto input_var = reinterpret_cast<const float *>(var->ptr_to_element(Coordinates(0, 0)));
- const auto input_gamma = (gamma != nullptr) ? reinterpret_cast<const float *>(gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
- const auto input_beta = (beta != nullptr) ? reinterpret_cast<const float *>(beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
+ const auto input_mean = reinterpret_cast<const float *>(mean->ptr_to_element(Coordinates(0, 0)));
+ const auto input_var = reinterpret_cast<const float *>(var->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma =
+ (gamma != nullptr) ? reinterpret_cast<const float *>(gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
+ const auto input_beta =
+ (beta != nullptr) ? reinterpret_cast<const float *>(beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
const auto epsilon_vec = svdup_n_f32(epsilon);
const auto const_1 = svdup_n_f32(1.f);
const auto const_0 = svdup_n_f32(0.f);
const auto va = svdup_n_f32(act_info.a());
const auto vb = svdup_n_f32(act_info.b());
- execute_window_loop(win_collapsed, [&](const Coordinates &)
- {
- const auto input_ptr = reinterpret_cast<const float *>(input.ptr());
- const auto output_ptr = reinterpret_cast<float *>(output.ptr());
-
- // Compute S elements per iteration
- int x = window_start_x;
- svbool_t pg = svwhilelt_b32(x, window_end_x);
- do
+ execute_window_loop(
+ win_collapsed,
+ [&](const Coordinates &)
{
- // Conctruct vectors
- const auto mean_vec = svld1_f32(pg, input_mean + x);
- const auto var_vec = svld1_f32(pg, input_var + x);
- const auto gamma_vec = (input_gamma != nullptr) ? svld1_f32(pg, input_gamma + x) : const_1;
- const auto beta_vec = (input_beta != nullptr) ? svld1_f32(pg, input_beta + x) : const_0;
+ const auto input_ptr = reinterpret_cast<const float *>(input.ptr());
+ const auto output_ptr = reinterpret_cast<float *>(output.ptr());
- // Calculate denominator
- const auto tmp = svadd_f32_z(pg, var_vec, epsilon_vec);
- auto denominator = svrsqrte_f32(tmp);
- denominator = svmul_f32_z(pg, svrsqrts_f32(svmul_f32_z(pg, tmp, denominator), denominator), denominator);
- denominator = svmul_f32_z(pg, svrsqrts_f32(svmul_f32_z(pg, tmp, denominator), denominator), denominator);
+ // Compute S elements per iteration
+ int x = window_start_x;
+ svbool_t pg = svwhilelt_b32(x, window_end_x);
+ do
+ {
+ // Conctruct vectors
+ const auto mean_vec = svld1_f32(pg, input_mean + x);
+ const auto var_vec = svld1_f32(pg, input_var + x);
+ const auto gamma_vec = (input_gamma != nullptr) ? svld1_f32(pg, input_gamma + x) : const_1;
+ const auto beta_vec = (input_beta != nullptr) ? svld1_f32(pg, input_beta + x) : const_0;
- // Calculate x bar
- const auto numerator = svsub_f32_z(pg, svld1_f32(pg, input_ptr + x), mean_vec);
- const auto x_bar = svmul_f32_z(pg, numerator, denominator);
- auto res = svmla_f32_z(pg, beta_vec, x_bar, gamma_vec);
+ // Calculate denominator
+ const auto tmp = svadd_f32_z(pg, var_vec, epsilon_vec);
+ auto denominator = svrsqrte_f32(tmp);
+ denominator =
+ svmul_f32_z(pg, svrsqrts_f32(svmul_f32_z(pg, tmp, denominator), denominator), denominator);
+ denominator =
+ svmul_f32_z(pg, svrsqrts_f32(svmul_f32_z(pg, tmp, denominator), denominator), denominator);
- // Perform fused activation
- if(act_info.enabled())
- {
- if(act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU)
- {
- res = svmax_f32_z(pg, const_0, res);
- }
- else if(act_info.activation() == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
- {
- res = svmin_f32_z(pg, va, svmax_f32_z(pg, const_0, res));
- }
- else if(act_info.activation() == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+ // Calculate x bar
+ const auto numerator = svsub_f32_z(pg, svld1_f32(pg, input_ptr + x), mean_vec);
+ const auto x_bar = svmul_f32_z(pg, numerator, denominator);
+ auto res = svmla_f32_z(pg, beta_vec, x_bar, gamma_vec);
+
+ // Perform fused activation
+ if (act_info.enabled())
{
- res = svmin_f32_z(pg, va, svmax_f32_z(pg, vb, res));
+ if (act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU)
+ {
+ res = svmax_f32_z(pg, const_0, res);
+ }
+ else if (act_info.activation() == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
+ {
+ res = svmin_f32_z(pg, va, svmax_f32_z(pg, const_0, res));
+ }
+ else if (act_info.activation() == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+ {
+ res = svmin_f32_z(pg, va, svmax_f32_z(pg, vb, res));
+ }
}
- }
- // Store results
- svst1_f32(pg, output_ptr + x, res);
+ // Store results
+ svst1_f32(pg, output_ptr + x, res);
- x += svcntw();
- pg = svwhilelt_b32(x, window_end_x);
- }
- while(svptest_any(svptrue_b32(), pg));
- },
- input, output);
+ x += svcntw();
+ pg = svwhilelt_b32(x, window_end_x);
+ } while (svptest_any(svptrue_b32(), pg));
+ },
+ input, output);
}
} // namespace cpu
} // namespace arm_compute
diff --git a/src/core/NEON/kernels/batchnormalization/impl/list.h b/src/core/NEON/kernels/batchnormalization/impl/list.h
index 8e0ea36f5a..c619788125 100644
--- a/src/core/NEON/kernels/batchnormalization/impl/list.h
+++ b/src/core/NEON/kernels/batchnormalization/impl/list.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,24 +21,39 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef SRC_CORE_NEON_KERNELS_BATCH_NORMALIZATION_LIST_H
-#define SRC_CORE_NEON_KERNELS_BATCH_NORMALIZATION_LIST_H
+#ifndef ACL_SRC_CORE_NEON_KERNELS_BATCHNORMALIZATION_IMPL_LIST_H
+#define ACL_SRC_CORE_NEON_KERNELS_BATCHNORMALIZATION_IMPL_LIST_H
namespace arm_compute
{
namespace cpu
{
-#define DECLARE_BATCH_NORMALIZATION_KERNEL(func_name) \
- void func_name(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, \
- float epsilon, ActivationLayerInfo &act_info, const Window &window)
+#define DECLARE_BATCH_NORMALIZATION_KERNEL(func_name) \
+ void func_name(ITensor *src, ITensor *dst, const ITensor *mean, const ITensor *var, const ITensor *beta, \
+ const ITensor *gamma, float epsilon, ActivationLayerInfo &act_info, const Window &window)
DECLARE_BATCH_NORMALIZATION_KERNEL(fp16_neon_batch_normalization);
DECLARE_BATCH_NORMALIZATION_KERNEL(fp16_sve_batch_normalization);
DECLARE_BATCH_NORMALIZATION_KERNEL(fp32_neon_batch_normalization);
DECLARE_BATCH_NORMALIZATION_KERNEL(fp32_sve_batch_normalization);
-#undef DECLARE_ACTIVATION_KERNEL
+#define DECLARE_BATCH_NORMALIZATION_NCHW_KERNEL(func_name) \
+ void func_name(const Window &window, ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, \
+ const ITensor *beta, const ITensor *gamma, float epsilon, ActivationLayerInfo act_info)
+
+DECLARE_BATCH_NORMALIZATION_NCHW_KERNEL(fp16_batch_normalization_nchw_non_fused);
+DECLARE_BATCH_NORMALIZATION_NCHW_KERNEL(fp32_batch_normalization_nchw_non_fused);
+DECLARE_BATCH_NORMALIZATION_NCHW_KERNEL(fp16_batch_normalization_nchw_non_fused_relu);
+DECLARE_BATCH_NORMALIZATION_NCHW_KERNEL(fp16_batch_normalization_nchw_non_fused_brelu);
+DECLARE_BATCH_NORMALIZATION_NCHW_KERNEL(fp16_batch_normalization_nchw_non_fused_lubrelu);
+DECLARE_BATCH_NORMALIZATION_NCHW_KERNEL(fp32_batch_normalization_nchw_non_fused_relu);
+DECLARE_BATCH_NORMALIZATION_NCHW_KERNEL(fp32_batch_normalization_nchw_non_fused_brelu);
+DECLARE_BATCH_NORMALIZATION_NCHW_KERNEL(fp32_batch_normalization_nchw_non_fused_lubrelu);
+
+#undef DECLARE_BATCH_NORMALIZATION_KERNEL
+#undef DECLARE_BATCH_NORMALIZATION_NCHW_KERNEL
+
} // namespace cpu
} // namespace arm_compute
-#endif /* SRC_CORE_NEON_KERNELS_BATCH_NORMALIZATION_LIST_H */
+#endif // ACL_SRC_CORE_NEON_KERNELS_BATCHNORMALIZATION_IMPL_LIST_H