aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/kernels/fuse_batch_normalization/generic/impl.h
diff options
context:
space:
mode:
authorPablo Marquez Tello <pablo.tello@arm.com>2023-10-13 10:03:58 +0100
committerPablo Marquez Tello <pablo.tello@arm.com>2023-10-20 10:59:09 +0000
commit074b985f3855193bb47fb4055abb6b12f09f48d7 (patch)
tree0c2308f1015d25f0b05beb07aec2497bcfb2be67 /src/cpu/kernels/fuse_batch_normalization/generic/impl.h
parent0fa92b849fd4892a341a3cda5e2ff9092093f841 (diff)
downloadComputeLibrary-074b985f3855193bb47fb4055abb6b12f09f48d7.tar.gz
FuseBatchNorm changes to enable fp16 in armv8a multi_isa builds
* FP16 kernels must be instantiated in fp16.cpp. * Partially resolves MLCE-1102 Change-Id: Ie652203876a0ac12b025e96d20990b6efb21e772 Signed-off-by: Pablo Marquez Tello <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10477 Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/cpu/kernels/fuse_batch_normalization/generic/impl.h')
-rw-r--r--src/cpu/kernels/fuse_batch_normalization/generic/impl.h120
1 files changed, 117 insertions, 3 deletions
diff --git a/src/cpu/kernels/fuse_batch_normalization/generic/impl.h b/src/cpu/kernels/fuse_batch_normalization/generic/impl.h
index 6fa843263a..d807148e37 100644
--- a/src/cpu/kernels/fuse_batch_normalization/generic/impl.h
+++ b/src/cpu/kernels/fuse_batch_normalization/generic/impl.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef SRC_CORE_NEON_KERNELS_FUSE_BATCH_NORMALIZATION_GENERIC_IMPL_H
-#define SRC_CORE_NEON_KERNELS_FUSE_BATCH_NORMALIZATION_GENERIC_IMPL_H
+#ifndef ACL_SRC_CPU_KERNELS_FUSE_BATCH_NORMALIZATION_GENERIC_IMPL_H
+#define ACL_SRC_CPU_KERNELS_FUSE_BATCH_NORMALIZATION_GENERIC_IMPL_H
#include "arm_compute/core/Helpers.h"
@@ -144,6 +144,120 @@ void fused_batch_normalization_conv(const ITensor *conv_weights,
},
conv_w_in, conv_w_out);
}
+template <typename T>
+void fused_batch_normalization_dwc_nchw(const ITensor *dwc_weights,
+ const ITensor *dwc_bias,
+ ITensor *fused_weights,
+ ITensor *fused_bias,
+ const ITensor *bn_mean,
+ const ITensor *bn_var,
+ const ITensor *bn_beta,
+ const ITensor *bn_gamma,
+ float epsilon,
+ const Window &window)
+{
+ using ScalarType = T;
+ const int size = 16 / dwc_weights->info()->element_size();
+ using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
+
+ const bool run_in_place_weights = (fused_weights == nullptr) || (fused_weights == dwc_weights);
+ const bool run_in_place_bias = (fused_bias == nullptr) || (dwc_bias != nullptr && fused_bias == dwc_bias);
+
+ // Set build options
+ Window win = window;
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ const int window_step_x = size;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ Iterator dwc_w_in(dwc_weights, win);
+ Iterator dwc_w_out(run_in_place_weights ? dwc_weights : fused_weights, win);
+
+ const auto dwc_bias_in =
+ (dwc_bias != nullptr ? reinterpret_cast<ScalarType *>(dwc_bias->ptr_to_element(Coordinates(0, 0))) : nullptr);
+ auto dwc_bias_out =
+ (run_in_place_bias ? dwc_bias_in
+ : reinterpret_cast<ScalarType *>(fused_bias->ptr_to_element(Coordinates(0, 0))));
+
+ const auto input_mean = reinterpret_cast<const ScalarType *>(bn_mean->ptr_to_element(Coordinates(0, 0)));
+ const auto input_var = reinterpret_cast<const ScalarType *>(bn_var->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma = (bn_gamma != nullptr)
+ ? reinterpret_cast<const ScalarType *>(bn_gamma->ptr_to_element(Coordinates(0, 0)))
+ : nullptr;
+ const auto input_beta = (bn_beta != nullptr)
+ ? reinterpret_cast<const ScalarType *>(bn_beta->ptr_to_element(Coordinates(0, 0)))
+ : nullptr;
+
+ auto mean_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{});
+ auto var_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{});
+ auto gamma_vec = wrapper::vdup_n(ScalarType(1), ExactTagType{});
+ auto beta_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{});
+ auto rvar_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{});
+ const auto epsilon_vec = wrapper::vdup_n(ScalarType(epsilon), ExactTagType{});
+
+ auto mean = ScalarType(0.0);
+ auto var = ScalarType(0.0);
+ auto gamma = ScalarType(1.0);
+ auto beta = ScalarType(0.0);
+ auto dwc_bias_in_scalar = ScalarType(0.0);
+ execute_window_loop(
+ win,
+ [&](const Coordinates &id)
+ {
+ var = input_var[id[2]];
+ if (input_gamma != nullptr)
+ {
+ gamma = input_gamma[id[2]];
+ }
+
+ if (id[1] == 0)
+ {
+ mean = input_mean[id[2]];
+
+ // Construct vectors
+ mean_vec = wrapper::vdup_n(mean, ExactTagType{});
+ if (input_beta != nullptr)
+ {
+ beta = input_beta[id[2]];
+ beta_vec = wrapper::vdup_n(beta, ExactTagType{});
+ }
+
+ if (dwc_bias_in != nullptr)
+ {
+ dwc_bias_in_scalar = dwc_bias_in[id[2]];
+ }
+
+ auto dwc_bias_tmp_scalar = (dwc_bias_in_scalar - mean) / std::sqrt(var + ScalarType(epsilon));
+ dwc_bias_out[id[2]] = (dwc_bias_tmp_scalar * gamma) + beta;
+ }
+
+ int x = window_start_x;
+ auto dwc_w_in_ptr = reinterpret_cast<const ScalarType *>(dwc_w_in.ptr());
+ auto dwc_w_out_ptr = reinterpret_cast<ScalarType *>(dwc_w_out.ptr());
+ var_vec = wrapper::vdup_n(var, ExactTagType{});
+ gamma_vec = wrapper::vdup_n(gamma, ExactTagType{});
+ rvar_vec = wrapper::vinvsqrt(wrapper::vadd(var_vec, epsilon_vec));
+
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ auto wn = wrapper::vloadq(dwc_w_in_ptr + x);
+ wn = wrapper::vmul(wn, rvar_vec);
+ wn = wrapper::vmul(wn, gamma_vec);
+
+ // Store results
+ wrapper::vstore(dwc_w_out_ptr + x, wn);
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ *(dwc_w_out_ptr + x) = *(dwc_w_in_ptr + x) / std::sqrt(var + ScalarType(epsilon)) * gamma;
+ }
+ },
+ dwc_w_in, dwc_w_out);
+}
+
} // namespace cpu
} // namespace arm_compute
-#endif //SRC_CORE_NEON_KERNELS_FUSE_BATCH_NORMALIZATION_GENERIC_IMPL_H
+#endif // ACL_SRC_CPU_KERNELS_FUSE_BATCH_NORMALIZATION_GENERIC_IMPL_H