From 41a729edf9facc6e901055e0cc84219f75670475 Mon Sep 17 00:00:00 2001 From: Yair Schwarzbaum Date: Mon, 15 Nov 2021 20:42:47 +0200 Subject: Decouple fuseBatchNormalizationKernel - Decouple data type for CPU implementation supported data types are: fp32, fp16 Resolves COMPMID-4613 Signed-off-by: Yair Schwarzbaum Change-Id: I8aff3ba2d446f64e4d182a866e3a3debc9ef613b Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7175 Reviewed-by: Giorgio Arena Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins --- .../fuse_batch_normalization/generic/fp16.cpp | 40 ++++++ .../fuse_batch_normalization/generic/fp32.cpp | 38 +++++ .../fuse_batch_normalization/generic/impl.cpp | 135 ++++++++++++++++++ .../fuse_batch_normalization/generic/impl.h | 39 +++++ src/cpu/kernels/fuse_batch_normalization/list.h | 56 ++++++++ .../kernels/fuse_batch_normalization/nchw/all.cpp | 145 +++++++++++++++++++ .../fuse_batch_normalization/nhwc/neon/fp16.cpp | 42 ++++++ .../fuse_batch_normalization/nhwc/neon/fp32.cpp | 40 ++++++ .../fuse_batch_normalization/nhwc/neon/impl.cpp | 158 +++++++++++++++++++++ .../fuse_batch_normalization/nhwc/neon/impl.h | 40 ++++++ 10 files changed, 733 insertions(+) create mode 100644 src/cpu/kernels/fuse_batch_normalization/generic/fp16.cpp create mode 100644 src/cpu/kernels/fuse_batch_normalization/generic/fp32.cpp create mode 100644 src/cpu/kernels/fuse_batch_normalization/generic/impl.cpp create mode 100644 src/cpu/kernels/fuse_batch_normalization/generic/impl.h create mode 100644 src/cpu/kernels/fuse_batch_normalization/list.h create mode 100644 src/cpu/kernels/fuse_batch_normalization/nchw/all.cpp create mode 100644 src/cpu/kernels/fuse_batch_normalization/nhwc/neon/fp16.cpp create mode 100644 src/cpu/kernels/fuse_batch_normalization/nhwc/neon/fp32.cpp create mode 100644 src/cpu/kernels/fuse_batch_normalization/nhwc/neon/impl.cpp create mode 100644 src/cpu/kernels/fuse_batch_normalization/nhwc/neon/impl.h (limited to 'src/cpu') diff --git a/src/cpu/kernels/fuse_batch_normalization/generic/fp16.cpp b/src/cpu/kernels/fuse_batch_normalization/generic/fp16.cpp new file mode 100644 index 0000000000..a29ee762fc --- /dev/null +++ b/src/cpu/kernels/fuse_batch_normalization/generic/fp16.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2021-2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) + +#include "src/cpu/kernels/fuse_batch_normalization/generic/impl.h" + +namespace arm_compute +{ +namespace cpu +{ +void fused_batch_normalization_conv_f16(const ITensor *conv_weights, const ITensor *conv_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window) +{ + return fused_batch_normalization_conv(conv_weights, conv_bias, fused_weights, fused_bias, + bn_mean, bn_var, bn_beta, bn_gamma, epsilon, window); +} +} // namespace cpu +} // namespace arm_compute +#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */ diff --git a/src/cpu/kernels/fuse_batch_normalization/generic/fp32.cpp b/src/cpu/kernels/fuse_batch_normalization/generic/fp32.cpp new file mode 100644 index 0000000000..076e97651d --- /dev/null +++ b/src/cpu/kernels/fuse_batch_normalization/generic/fp32.cpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2021-2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "src/cpu/kernels/fuse_batch_normalization/generic/impl.h" + +namespace arm_compute +{ +namespace cpu +{ +void fused_batch_normalization_conv_f32(const ITensor *conv_weights, const ITensor *conv_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window) +{ + return fused_batch_normalization_conv(conv_weights, conv_bias, fused_weights, fused_bias, + bn_mean, bn_var, bn_beta, bn_gamma, epsilon, window); +} +} // namespace cpu +} // namespace arm_compute diff --git a/src/cpu/kernels/fuse_batch_normalization/generic/impl.cpp b/src/cpu/kernels/fuse_batch_normalization/generic/impl.cpp new file mode 100644 index 0000000000..3c6a2069ee --- /dev/null +++ b/src/cpu/kernels/fuse_batch_normalization/generic/impl.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2018-2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/cpu/kernels/fuse_batch_normalization/generic/impl.h" + +namespace arm_compute +{ +namespace cpu +{ +template +void fused_batch_normalization_conv(const ITensor *conv_weights, const ITensor *conv_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window) +{ + using ScalarType = T; + const int size = 16 / conv_weights->info()->element_size(); + using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t; + + const bool run_in_place_weights = (fused_weights == nullptr) || (fused_weights == conv_weights); + const bool run_in_place_bias = (fused_bias == nullptr) || (conv_bias != nullptr && fused_bias == conv_bias); + + // Set build options + Window win = window; + win.set(Window::DimX, Window::Dimension(0, 1, 1)); + + const int window_step_x = size; + const auto window_start_x = static_cast(window.x().start()); + const auto window_end_x = static_cast(window.x().end()); + + Iterator conv_w_in(conv_weights, win); + Iterator conv_w_out(run_in_place_weights ? conv_weights : fused_weights, win); + + const auto conv_bias_in = (conv_bias != nullptr ? reinterpret_cast(conv_bias->ptr_to_element(Coordinates(0, 0))) : nullptr); + auto conv_bias_out = (run_in_place_bias ? conv_bias_in : reinterpret_cast(fused_bias->ptr_to_element(Coordinates(0, 0)))); + + const auto input_mean = reinterpret_cast(bn_mean->ptr_to_element(Coordinates(0, 0))); + const auto input_var = reinterpret_cast(bn_var->ptr_to_element(Coordinates(0, 0))); + const auto input_gamma = (bn_gamma != nullptr) ? reinterpret_cast(bn_gamma->ptr_to_element(Coordinates(0, 0))) : nullptr; + const auto input_beta = (bn_beta != nullptr) ? reinterpret_cast(bn_beta->ptr_to_element(Coordinates(0, 0))) : nullptr; + + auto mean_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{}); + auto var_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{}); + auto gamma_vec = wrapper::vdup_n(ScalarType(1), ExactTagType{}); + auto beta_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{}); + auto rvar_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{}); + const auto epsilon_vec = wrapper::vdup_n(ScalarType(epsilon), ExactTagType{}); + + auto mean = ScalarType(0.0); + auto var = ScalarType(0.0); + auto gamma = ScalarType(1.0); + auto beta = ScalarType(0.0); + auto conv_bias_in_scalar = ScalarType(0.0); + execute_window_loop(win, [&](const Coordinates & id) + { + var = input_var[id[3]]; + if(input_gamma != nullptr) + { + gamma = input_gamma[id[3]]; + } + + if((id[0] == 0) && (id[1] == 0) && (id[2] == 0)) + { + if(input_beta != nullptr) + { + beta = input_beta[id[3]]; + beta_vec = wrapper::vdup_n(beta, ExactTagType{}); + } + + // Construct vectors + mean = input_mean[id[3]]; + mean_vec = wrapper::vdup_n(mean, ExactTagType{}); + + if(conv_bias_in != nullptr) + { + conv_bias_in_scalar = conv_bias_in[id[3]]; + } + auto conv_bias_tmp_scalar = (conv_bias_in_scalar - mean) / std::sqrt(var + ScalarType(epsilon)); + conv_bias_out[id[3]] = (conv_bias_tmp_scalar * gamma) + beta; + } + + int x = window_start_x; + auto conv_w_in_ptr = reinterpret_cast(conv_w_in.ptr()); + auto conv_w_out_ptr = reinterpret_cast(conv_w_out.ptr()); + var_vec = wrapper::vdup_n(var, ExactTagType{}); + gamma_vec = wrapper::vdup_n(gamma, ExactTagType{}); + rvar_vec = wrapper::vinvsqrt(wrapper::vadd(var_vec, epsilon_vec)); + + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + auto wn = wrapper::vloadq(conv_w_in_ptr + x); + wn = wrapper::vmul(wn, rvar_vec); + wn = wrapper::vmul(wn, gamma_vec); + + // Store results + wrapper::vstore(conv_w_out_ptr + x, wn); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(conv_w_out_ptr + x) = *(conv_w_in_ptr + x) / std::sqrt(var + ScalarType(epsilon)) * gamma; + } + }, + conv_w_in, conv_w_out); +} + +template void fused_batch_normalization_conv(const ITensor *conv_weights, const ITensor *conv_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window); + +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) +template void fused_batch_normalization_conv(const ITensor *conv_weights, const ITensor *conv_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window); +#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */ + +} // namespace cpu +} // namespace arm_compute diff --git a/src/cpu/kernels/fuse_batch_normalization/generic/impl.h b/src/cpu/kernels/fuse_batch_normalization/generic/impl.h new file mode 100644 index 0000000000..979ea13842 --- /dev/null +++ b/src/cpu/kernels/fuse_batch_normalization/generic/impl.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2021-2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef SRC_CORE_NEON_KERNELS_FUSE_BATCH_NORMALIZATION_GENERIC_IMPL_H +#define SRC_CORE_NEON_KERNELS_FUSE_BATCH_NORMALIZATION_GENERIC_IMPL_H + +#include "arm_compute/core/Helpers.h" +#include "src/core/NEON/wrapper/wrapper.h" + +namespace arm_compute +{ +namespace cpu +{ +template +void fused_batch_normalization_conv(const ITensor *conv_weights, const ITensor *conv_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window); +} +} +#endif //SRC_CORE_NEON_KERNELS_FUSE_BATCH_NORMALIZATION_GENERIC_IMPL_H \ No newline at end of file diff --git a/src/cpu/kernels/fuse_batch_normalization/list.h b/src/cpu/kernels/fuse_batch_normalization/list.h new file mode 100644 index 0000000000..e25b1e5fed --- /dev/null +++ b/src/cpu/kernels/fuse_batch_normalization/list.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2021-2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef SRC_CORE_NEON_KERNELS_FUSE_BATCH_NORMALIZATION_LIST_H +#define SRC_CORE_NEON_KERNELS_FUSE_BATCH_NORMALIZATION_LIST_H + +namespace arm_compute +{ +namespace cpu +{ +#define DECLARE_FUSE_BATCH_NORMALIZE_CONV_KERNEL(func_name) \ + void func_name(const ITensor *conv_weights, const ITensor *conv_bias, ITensor *fused_weights, ITensor *fused_bias, \ + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window) + +#define DECLARE_FUSE_BATCH_NORMALIZE_DWC_NCHW_CONV_KERNEL(func_name) \ + void func_name(const ITensor *dwc_weights, const ITensor *dwc_bias, ITensor *fused_weights, ITensor *fused_bias, \ + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window) + +#define DECLARE_FUSE_BATCH_NORMALIZE_DWC_NHWC_CONV_KERNEL(func_name) \ + void func_name(const ITensor *dwc_weights, const ITensor *dwc_bias, ITensor *fused_weights, ITensor *fused_bias, \ + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window) + +DECLARE_FUSE_BATCH_NORMALIZE_CONV_KERNEL(fused_batch_normalization_conv_f16); +DECLARE_FUSE_BATCH_NORMALIZE_CONV_KERNEL(fused_batch_normalization_conv_f32); +DECLARE_FUSE_BATCH_NORMALIZE_DWC_NHWC_CONV_KERNEL(fused_batch_normalization_dwc_nhwc_f16); +DECLARE_FUSE_BATCH_NORMALIZE_DWC_NHWC_CONV_KERNEL(fused_batch_normalization_dwc_nhwc_f32); +DECLARE_FUSE_BATCH_NORMALIZE_DWC_NCHW_CONV_KERNEL(fused_batch_normalization_dwc_nchw_f16); +DECLARE_FUSE_BATCH_NORMALIZE_DWC_NCHW_CONV_KERNEL(fused_batch_normalization_dwc_nchw_f32); + +#undef DECLARE_FUSE_BATCH_NORMALIZE_CONV_KERNEL +#undef DECLARE_FUSE_BATCH_NORMALIZE_DWC_NCHW_CONV_KERNEL +#undef DECLARE_FUSE_BATCH_NORMALIZE_DWC_NHWC_CONV_KERNEL +} +} + +#endif // \ No newline at end of file diff --git a/src/cpu/kernels/fuse_batch_normalization/nchw/all.cpp b/src/cpu/kernels/fuse_batch_normalization/nchw/all.cpp new file mode 100644 index 0000000000..1e3be8792d --- /dev/null +++ b/src/cpu/kernels/fuse_batch_normalization/nchw/all.cpp @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2018-2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "src/cpu/kernels/fuse_batch_normalization/generic/impl.h" + +namespace arm_compute +{ +namespace cpu +{ +template +void fused_batch_normalization_dwc_nchw(const ITensor *dwc_weights, const ITensor *dwc_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window) +{ + using ScalarType = T; + const int size = 16 / dwc_weights->info()->element_size(); + using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t; + + const bool run_in_place_weights = (fused_weights == nullptr) || (fused_weights == dwc_weights); + const bool run_in_place_bias = (fused_bias == nullptr) || (dwc_bias != nullptr && fused_bias == dwc_bias); + + // Set build options + Window win = window; + win.set(Window::DimX, Window::Dimension(0, 1, 1)); + + const int window_step_x = size; + const auto window_start_x = static_cast(window.x().start()); + const auto window_end_x = static_cast(window.x().end()); + + Iterator dwc_w_in(dwc_weights, win); + Iterator dwc_w_out(run_in_place_weights ? dwc_weights : fused_weights, win); + + const auto dwc_bias_in = (dwc_bias != nullptr ? reinterpret_cast(dwc_bias->ptr_to_element(Coordinates(0, 0))) : nullptr); + auto dwc_bias_out = (run_in_place_bias ? dwc_bias_in : reinterpret_cast(fused_bias->ptr_to_element(Coordinates(0, 0)))); + + const auto input_mean = reinterpret_cast(bn_mean->ptr_to_element(Coordinates(0, 0))); + const auto input_var = reinterpret_cast(bn_var->ptr_to_element(Coordinates(0, 0))); + const auto input_gamma = (bn_gamma != nullptr) ? reinterpret_cast(bn_gamma->ptr_to_element(Coordinates(0, 0))) : nullptr; + const auto input_beta = (bn_beta != nullptr) ? reinterpret_cast(bn_beta->ptr_to_element(Coordinates(0, 0))) : nullptr; + + auto mean_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{}); + auto var_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{}); + auto gamma_vec = wrapper::vdup_n(ScalarType(1), ExactTagType{}); + auto beta_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{}); + auto rvar_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{}); + const auto epsilon_vec = wrapper::vdup_n(ScalarType(epsilon), ExactTagType{}); + + auto mean = ScalarType(0.0); + auto var = ScalarType(0.0); + auto gamma = ScalarType(1.0); + auto beta = ScalarType(0.0); + auto dwc_bias_in_scalar = ScalarType(0.0); + execute_window_loop(win, [&](const Coordinates & id) + { + var = input_var[id[2]]; + if(input_gamma != nullptr) + { + gamma = input_gamma[id[2]]; + } + + if(id[1] == 0) + { + mean = input_mean[id[2]]; + + // Construct vectors + mean_vec = wrapper::vdup_n(mean, ExactTagType{}); + if(input_beta != nullptr) + { + beta = input_beta[id[2]]; + beta_vec = wrapper::vdup_n(beta, ExactTagType{}); + } + + if(dwc_bias_in != nullptr) + { + dwc_bias_in_scalar = dwc_bias_in[id[2]]; + } + + auto dwc_bias_tmp_scalar = (dwc_bias_in_scalar - mean) / std::sqrt(var + ScalarType(epsilon)); + dwc_bias_out[id[2]] = (dwc_bias_tmp_scalar * gamma) + beta; + } + + int x = window_start_x; + auto dwc_w_in_ptr = reinterpret_cast(dwc_w_in.ptr()); + auto dwc_w_out_ptr = reinterpret_cast(dwc_w_out.ptr()); + var_vec = wrapper::vdup_n(var, ExactTagType{}); + gamma_vec = wrapper::vdup_n(gamma, ExactTagType{}); + rvar_vec = wrapper::vinvsqrt(wrapper::vadd(var_vec, epsilon_vec)); + + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + auto wn = wrapper::vloadq(dwc_w_in_ptr + x); + wn = wrapper::vmul(wn, rvar_vec); + wn = wrapper::vmul(wn, gamma_vec); + + // Store results + wrapper::vstore(dwc_w_out_ptr + x, wn); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + *(dwc_w_out_ptr + x) = *(dwc_w_in_ptr + x) / std::sqrt(var + ScalarType(epsilon)) * gamma; + } + }, + dwc_w_in, dwc_w_out); +} + +void fused_batch_normalization_dwc_nchw_f32(const ITensor *dwc_weights, const ITensor *dwc_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window) +{ + return fused_batch_normalization_dwc_nchw(dwc_weights, dwc_bias, fused_weights, fused_bias, + bn_mean, bn_var, bn_beta, bn_gamma, epsilon, window); +} + +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) +void fused_batch_normalization_dwc_nchw_f16(const ITensor *dwc_weights, const ITensor *dwc_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window) +{ + return fused_batch_normalization_dwc_nchw(dwc_weights, dwc_bias, fused_weights, fused_bias, + bn_mean, bn_var, bn_beta, bn_gamma, epsilon, window); +} +#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */ + +} // namespace cpu +} // namespace arm_compute diff --git a/src/cpu/kernels/fuse_batch_normalization/nhwc/neon/fp16.cpp b/src/cpu/kernels/fuse_batch_normalization/nhwc/neon/fp16.cpp new file mode 100644 index 0000000000..275211ff38 --- /dev/null +++ b/src/cpu/kernels/fuse_batch_normalization/nhwc/neon/fp16.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2021-2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) + +#include "src/cpu/kernels/fuse_batch_normalization/generic/impl.h" +#include "src/cpu/kernels/fuse_batch_normalization/nhwc/neon/impl.h" + +namespace arm_compute +{ +namespace cpu +{ +void fused_batch_normalization_dwc_nhwc_f16(const ITensor *dwc_weights, const ITensor *dwc_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window) +{ + return fused_batch_normalization_dwc_nhwc(dwc_weights, dwc_bias, fused_weights, fused_bias, + bn_mean, bn_var, bn_beta, bn_gamma, epsilon, window); +} + +} // namespace cpu +} // namespace arm_compute +#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */ diff --git a/src/cpu/kernels/fuse_batch_normalization/nhwc/neon/fp32.cpp b/src/cpu/kernels/fuse_batch_normalization/nhwc/neon/fp32.cpp new file mode 100644 index 0000000000..67169c5325 --- /dev/null +++ b/src/cpu/kernels/fuse_batch_normalization/nhwc/neon/fp32.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2021-2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "src/cpu/kernels/fuse_batch_normalization/generic/impl.h" +#include "src/cpu/kernels/fuse_batch_normalization/nhwc/neon/impl.h" + +namespace arm_compute +{ +namespace cpu +{ +void fused_batch_normalization_dwc_nhwc_f32(const ITensor *dwc_weights, const ITensor *dwc_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window) +{ + return fused_batch_normalization_dwc_nhwc(dwc_weights, dwc_bias, fused_weights, fused_bias, + bn_mean, bn_var, bn_beta, bn_gamma, epsilon, window); +} + +} // namespace cpu +} // namespace arm_compute diff --git a/src/cpu/kernels/fuse_batch_normalization/nhwc/neon/impl.cpp b/src/cpu/kernels/fuse_batch_normalization/nhwc/neon/impl.cpp new file mode 100644 index 0000000000..e33af4ebbe --- /dev/null +++ b/src/cpu/kernels/fuse_batch_normalization/nhwc/neon/impl.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2018-2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/cpu/kernels/fuse_batch_normalization/nhwc/neon/impl.h" + +namespace arm_compute +{ +namespace cpu +{ +template +void fused_batch_normalization_dwc_nhwc(const ITensor *dwc_weights, const ITensor *dwc_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window) +{ + using ScalarType = T; + const int size = 16 / dwc_weights->info()->element_size(); + using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t; + + const bool run_in_place_weights = (fused_weights == nullptr) || (fused_weights == dwc_weights); + const bool run_in_place_bias = (fused_bias == nullptr) || (dwc_bias != nullptr && fused_bias == dwc_bias); + + // Set build options + Window win = window; + win.set(Window::DimX, Window::Dimension(0, 1, 1)); + + const int window_step_x = size; + const auto window_start_x = static_cast(window.x().start()); + const auto window_end_x = static_cast(window.x().end()); + + Iterator dwc_w_in(dwc_weights, win); + Iterator dwc_w_out(run_in_place_weights ? dwc_weights : fused_weights, win); + + const auto dwc_bias_in = (dwc_bias != nullptr ? reinterpret_cast(dwc_bias->ptr_to_element(Coordinates(0, 0))) : nullptr); + auto dwc_bias_out = (run_in_place_bias ? dwc_bias_in : reinterpret_cast(fused_bias->ptr_to_element(Coordinates(0, 0)))); + + const auto input_mean = reinterpret_cast(bn_mean->ptr_to_element(Coordinates(0, 0))); + const auto input_var = reinterpret_cast(bn_var->ptr_to_element(Coordinates(0, 0))); + const auto input_gamma = (bn_gamma != nullptr) ? reinterpret_cast(bn_gamma->ptr_to_element(Coordinates(0, 0))) : nullptr; + const auto input_beta = (bn_beta != nullptr) ? reinterpret_cast(bn_beta->ptr_to_element(Coordinates(0, 0))) : nullptr; + + auto mean_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{}); + auto var_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{}); + auto gamma_vec = wrapper::vdup_n(ScalarType(1), ExactTagType{}); + auto beta_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{}); + auto rvar_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{}); + auto dwc_bias_vec = wrapper::vdup_n(ScalarType(0), ExactTagType{}); + const auto epsilon_vec = wrapper::vdup_n(ScalarType(epsilon), ExactTagType{}); + + auto gamma = ScalarType(1.0); + auto beta = ScalarType(0.0); + auto dwc_bias_in_scalar = ScalarType(0); + + execute_window_loop(win, [&](const Coordinates & id) + { + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + var_vec = wrapper::vloadq(input_var + x); + if(input_gamma != nullptr) + { + gamma_vec = wrapper::vloadq(input_gamma + x); + } + + if((id[2] == 0) && (id[1] == 0)) + { + mean_vec = wrapper::vloadq(input_mean + x); + + // Construct vectors + if(input_beta != nullptr) + { + beta_vec = wrapper::vloadq(input_beta + x); + } + + if(dwc_bias_in != nullptr) + { + dwc_bias_vec = wrapper::vloadq(dwc_bias_in + x); + } + + auto dwc_bias_tmp_vec = wrapper::vmul(wrapper::vsub(dwc_bias_vec, mean_vec), wrapper::vinvsqrt(wrapper::vadd(var_vec, epsilon_vec))); + dwc_bias_tmp_vec = wrapper::vadd(wrapper::vmul(dwc_bias_tmp_vec, gamma_vec), beta_vec); + wrapper::vstore(dwc_bias_out + x, dwc_bias_tmp_vec); + } + + auto dwc_w_in_ptr = reinterpret_cast(dwc_w_in.ptr()); + auto dwc_w_out_ptr = reinterpret_cast(dwc_w_out.ptr()); + + auto wn = wrapper::vloadq(dwc_w_in_ptr + x); + rvar_vec = wrapper::vinvsqrt(wrapper::vadd(var_vec, epsilon_vec)); + wn = wrapper::vmul(wn, rvar_vec); + wn = wrapper::vmul(wn, gamma_vec); + + // Store results + wrapper::vstore(dwc_w_out_ptr + x, wn); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + auto var = input_var[x]; + if(input_gamma != nullptr) + { + gamma = input_gamma[x]; + } + + if(id[2] == 0 && id[1] == 0) + { + auto mean = input_mean[x]; + if(input_beta != nullptr) + { + beta = input_beta[x]; + } + if(dwc_bias_in != nullptr) + { + dwc_bias_in_scalar = dwc_bias_in[x]; + } + + auto dwc_bias_tmp_scalar = (dwc_bias_in_scalar - mean) / std::sqrt(var + ScalarType(epsilon)); + dwc_bias_out[x] = (dwc_bias_tmp_scalar * gamma) + beta; + } + + const auto dwc_w_in_ptr = reinterpret_cast(dwc_w_in.ptr()); + auto dwc_w_out_ptr = reinterpret_cast(dwc_w_out.ptr()); + + *(dwc_w_out_ptr + x) = *(dwc_w_in_ptr + x) / std::sqrt(var + ScalarType(epsilon)) * gamma; + } + }, + dwc_w_in, dwc_w_out); +} + +template void fused_batch_normalization_dwc_nhwc(const ITensor *dwc_weights, const ITensor *dwc_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window); + +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) +template void fused_batch_normalization_dwc_nhwc(const ITensor *dwc_weights, const ITensor *dwc_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window); +#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */ + +} // namespace cpu +} // namespace arm_compute diff --git a/src/cpu/kernels/fuse_batch_normalization/nhwc/neon/impl.h b/src/cpu/kernels/fuse_batch_normalization/nhwc/neon/impl.h new file mode 100644 index 0000000000..3b813132b1 --- /dev/null +++ b/src/cpu/kernels/fuse_batch_normalization/nhwc/neon/impl.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2021-2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef SRC_CORE_NEON_KERNELS_FUSE_BATCH_NORMALIZATION_IMPL_H +#define SRC_CORE_NEON_KERNELS_FUSE_BATCH_NORMALIZATION_IMPL_H + +#include "arm_compute/core/Helpers.h" +#include "src/core/NEON/wrapper/wrapper.h" + +namespace arm_compute +{ +namespace cpu +{ +template +void fused_batch_normalization_dwc_nhwc(const ITensor *dwc_weights, const ITensor *dwc_bias, ITensor *fused_weights, ITensor *fused_bias, + const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window); + +} // namespace cpu +} // namespace arm_compute +#endif //SRC_CORE_NEON_KERNELS_FUSE_BATCH_NORMALIZATION_IMPL_H -- cgit v1.2.1