aboutsummaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
authorPablo Marquez Tello <pablo.tello@arm.com>2023-11-21 10:10:01 +0000
committerPablo Marquez Tello <pablo.tello@arm.com>2023-11-27 17:16:45 +0000
commit8d4cdd43a74574e0f99f83f1adb1d391c0c85abe (patch)
tree614000681778c2f390897888ce69dfdd62561799 /src/cpu
parent835577e1477003789c392d8faab4a3bb8f4040ba (diff)
downloadComputeLibrary-8d4cdd43a74574e0f99f83f1adb1d391c0c85abe.tar.gz
BatchNorm changes to enable fp16 in armv8a multi_isa builds
* Moved NCHW kernels fp16 and fp32 to their corresponding files src/cpu/kernels/fuse_batch_normalization/nchw/neon/fp16.cpp and src/cpu/kernels/fuse_batch_normalization/nchw/neon/fp32.cpp * Changes in filelist.json to include the new fp16 and fp32 files * Moved the template batch_normalization_nchw to impl.h as we need to instantiate it from fp16.cpp and fp32.cpp * Pooling layer: removed the guard __ARM_FEATURE_FP16_VECTOR_ARITHMETIC that prevented the FP16 kernel execution. * Partially resolves MLCE-1102 Change-Id: Ia8c85e9ffb76c9e387f9ae2685e5df5e52c8dc27 Signed-off-by: Pablo Marquez Tello <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10777 Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/kernels/CpuPool2dKernel.cpp2
-rw-r--r--src/cpu/kernels/fuse_batch_normalization/generic/impl.h118
-rw-r--r--src/cpu/kernels/fuse_batch_normalization/nchw/neon/fp16.cpp96
-rw-r--r--src/cpu/kernels/fuse_batch_normalization/nchw/neon/fp32.cpp93
4 files changed, 307 insertions, 2 deletions
diff --git a/src/cpu/kernels/CpuPool2dKernel.cpp b/src/cpu/kernels/CpuPool2dKernel.cpp
index 9308d860d1..2c9627bdee 100644
--- a/src/cpu/kernels/CpuPool2dKernel.cpp
+++ b/src/cpu/kernels/CpuPool2dKernel.cpp
@@ -271,11 +271,9 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *
break;
}
break;
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
case DataType::F16:
num_elems_processed_per_iteration = 1;
break;
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
case DataType::F32:
num_elems_processed_per_iteration = 1;
break;
diff --git a/src/cpu/kernels/fuse_batch_normalization/generic/impl.h b/src/cpu/kernels/fuse_batch_normalization/generic/impl.h
index d807148e37..0c90abccb1 100644
--- a/src/cpu/kernels/fuse_batch_normalization/generic/impl.h
+++ b/src/cpu/kernels/fuse_batch_normalization/generic/impl.h
@@ -32,6 +32,124 @@ namespace arm_compute
{
namespace cpu
{
+template <typename T, bool fused_activation, typename F>
+void batch_normalization_nchw(const Window &window,
+ ITensor *in,
+ ITensor *out,
+ const ITensor *in_mean,
+ const ITensor *in_var,
+ const ITensor *in_beta,
+ const ITensor *in_gamma,
+ float epsilon,
+ ActivationLayerInfo act_info)
+{
+ /** SIMD vector tag type. */
+ using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
+
+ const int window_step_x = 16 / sizeof(T);
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ Window win_to_use = window;
+ win_to_use.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator input(in, win_to_use);
+ Iterator output(out, win_to_use);
+
+ F activation_functor(act_info);
+
+ // Hold information about the current feature map we are iterating.
+ // Only compute denominator and constants once per feature map.
+ int slice = -1;
+
+ const auto input_mean = reinterpret_cast<const T *>(in_mean->ptr_to_element(Coordinates(0, 0)));
+ const auto input_var = reinterpret_cast<const T *>(in_var->ptr_to_element(Coordinates(0, 0)));
+ const auto input_gamma =
+ (in_gamma != nullptr) ? reinterpret_cast<const T *>(in_gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
+ const auto input_beta =
+ (in_beta != nullptr) ? reinterpret_cast<const T *>(in_beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
+
+ T mean = static_cast<T>(0);
+ T var = static_cast<T>(0);
+ T gamma = static_cast<T>(1);
+ T beta = static_cast<T>(0);
+ T denominator = static_cast<T>(0);
+
+ auto mean_vec = wrapper::vdup_n(mean, ExactTagType{});
+ auto var_vec = wrapper::vdup_n(var, ExactTagType{});
+ auto gamma_vec = wrapper::vdup_n(gamma, ExactTagType{});
+ auto beta_vec = wrapper::vdup_n(beta, ExactTagType{});
+ auto denominator_vec = wrapper::vdup_n(denominator, ExactTagType{});
+ const auto epsilon_vec = wrapper::vdup_n(static_cast<T>(epsilon), ExactTagType{});
+ execute_window_loop(
+ win_to_use,
+ [&](const Coordinates &id)
+ {
+ const auto input_ptr = reinterpret_cast<const T *>(input.ptr());
+ const auto output_ptr = reinterpret_cast<T *>(output.ptr());
+
+ if (slice != id.z())
+ {
+ mean = input_mean[id.z()];
+ var = input_var[id.z()];
+ mean_vec = wrapper::vdup_n(mean, ExactTagType{});
+ var_vec = wrapper::vdup_n(var, ExactTagType{});
+ if (input_gamma != nullptr)
+ {
+ gamma = input_gamma[id.z()];
+ gamma_vec = wrapper::vdup_n(gamma, ExactTagType{});
+ }
+ if (input_beta != nullptr)
+ {
+ beta = input_beta[id.z()];
+ beta_vec = wrapper::vdup_n(beta, ExactTagType{});
+ }
+
+ // Calculate denominator
+ denominator_vec = wrapper::vinvsqrt(wrapper::vadd(var_vec, epsilon_vec));
+ denominator = wrapper::vgetlane(denominator_vec, 0);
+ slice = id.z();
+ }
+
+ // Perform core calculations using vector operations
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ // Calculate x bar
+ const auto numerator = wrapper::vsub(wrapper::vloadq(input_ptr + x), mean_vec);
+ const auto x_bar = wrapper::vmul(numerator, denominator_vec);
+ auto res = wrapper::vmla(beta_vec, x_bar, gamma_vec);
+
+ // Perform fused activation
+ if (fused_activation)
+ {
+ activation_functor(res);
+ }
+
+ // Store results
+ wrapper::vstore(output_ptr + x, res);
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ const T numerator = input_ptr[x] - mean;
+ const T x_bar = numerator * denominator;
+ T res = beta + x_bar * gamma;
+
+ // Perform fused activation
+ if (fused_activation)
+ {
+ activation_functor(res);
+ }
+
+ // Store results
+ *(output_ptr + x) = res;
+ }
+ },
+ input, output);
+}
+
template <typename T>
void fused_batch_normalization_conv(const ITensor *conv_weights,
const ITensor *conv_bias,
diff --git a/src/cpu/kernels/fuse_batch_normalization/nchw/neon/fp16.cpp b/src/cpu/kernels/fuse_batch_normalization/nchw/neon/fp16.cpp
new file mode 100644
index 0000000000..ae4c7e5736
--- /dev/null
+++ b/src/cpu/kernels/fuse_batch_normalization/nchw/neon/fp16.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+
+#include "src/core/CPP/Validate.h"
+#include "src/core/NEON/kernels/detail/NEActivationFunctionDetail.h"
+#include "src/core/NEON/wrapper/wrapper.h"
+#include "src/cpu/CpuTypes.h"
+#include "src/cpu/kernels/fuse_batch_normalization/generic/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void fp16_batch_normalization_nchw_non_fused(const Window &window,
+ ITensor *input,
+ ITensor *output,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo act_info)
+{
+ batch_normalization_nchw<float16_t, false, detail::dummy<float16_t, 8>>(window, input, output, mean, var, beta,
+ gamma, epsilon, act_info);
+}
+
+void fp16_batch_normalization_nchw_non_fused_relu(const Window &window,
+ ITensor *input,
+ ITensor *output,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo act_info)
+{
+ batch_normalization_nchw<float16_t, true, detail::relu<float16_t, 8>>(window, input, output, mean, var, beta, gamma,
+ epsilon, act_info);
+}
+
+void fp16_batch_normalization_nchw_non_fused_brelu(const Window &window,
+ ITensor *input,
+ ITensor *output,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo act_info)
+{
+ batch_normalization_nchw<float16_t, true, detail::brelu<float16_t, 8>>(window, input, output, mean, var, beta,
+ gamma, epsilon, act_info);
+}
+
+void fp16_batch_normalization_nchw_non_fused_lubrelu(const Window &window,
+ ITensor *input,
+ ITensor *output,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo act_info)
+{
+ batch_normalization_nchw<float16_t, true, detail::lubrelu<float16_t, 8>>(window, input, output, mean, var, beta,
+ gamma, epsilon, act_info);
+}
+} // namespace cpu
+} // namespace arm_compute
+#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
diff --git a/src/cpu/kernels/fuse_batch_normalization/nchw/neon/fp32.cpp b/src/cpu/kernels/fuse_batch_normalization/nchw/neon/fp32.cpp
new file mode 100644
index 0000000000..ae2db1ac66
--- /dev/null
+++ b/src/cpu/kernels/fuse_batch_normalization/nchw/neon/fp32.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+
+#include "src/core/CPP/Validate.h"
+#include "src/core/NEON/kernels/detail/NEActivationFunctionDetail.h"
+#include "src/core/NEON/wrapper/wrapper.h"
+#include "src/cpu/CpuTypes.h"
+#include "src/cpu/kernels/fuse_batch_normalization/generic/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void fp32_batch_normalization_nchw_non_fused(const Window &window,
+ ITensor *input,
+ ITensor *output,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo act_info)
+{
+ batch_normalization_nchw<float, false, detail::dummy<float, 4>>(window, input, output, mean, var, beta, gamma,
+ epsilon, act_info);
+}
+
+void fp32_batch_normalization_nchw_non_fused_relu(const Window &window,
+ ITensor *input,
+ ITensor *output,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo act_info)
+{
+ batch_normalization_nchw<float, true, detail::relu<float, 4>>(window, input, output, mean, var, beta, gamma,
+ epsilon, act_info);
+}
+
+void fp32_batch_normalization_nchw_non_fused_brelu(const Window &window,
+ ITensor *input,
+ ITensor *output,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo act_info)
+{
+ batch_normalization_nchw<float, true, detail::brelu<float, 4>>(window, input, output, mean, var, beta, gamma,
+ epsilon, act_info);
+}
+
+void fp32_batch_normalization_nchw_non_fused_lubrelu(const Window &window,
+ ITensor *input,
+ ITensor *output,
+ const ITensor *mean,
+ const ITensor *var,
+ const ITensor *beta,
+ const ITensor *gamma,
+ float epsilon,
+ ActivationLayerInfo act_info)
+{
+ batch_normalization_nchw<float, true, detail::lubrelu<float, 4>>(window, input, output, mean, var, beta, gamma,
+ epsilon, act_info);
+}
+} // namespace cpu
+} // namespace arm_compute