From 5e99318e4378b1f151fc85cd241adf7b222a088c Mon Sep 17 00:00:00 2001 From: Yair Schwarzbaum Date: Mon, 10 Jan 2022 15:11:07 +0200 Subject: Decouple NEL2NormalizeLayerKernel Resolves: COMPMID-4615 Signed-off-by: Yair Schwarzbaum Change-Id: Iadbfb3e45831a5072962b5b9f61e8ae2e674ccc4 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7016 Reviewed-by: Giorgio Arena Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins --- Android.bp | 3 + filelist.json | 7 +- src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp | 142 ++++++++------------- src/cpu/kernels/l2normlayer/generic/neon/fp16.cpp | 45 +++++++ src/cpu/kernels/l2normlayer/generic/neon/fp32.cpp | 45 +++++++ src/cpu/kernels/l2normlayer/generic/neon/impl.cpp | 131 +++++++++++++++++++ src/cpu/kernels/l2normlayer/generic/neon/impl.h | 44 +++++++ src/cpu/kernels/l2normlayer/list.h | 41 ++++++ 8 files changed, 370 insertions(+), 88 deletions(-) create mode 100644 src/cpu/kernels/l2normlayer/generic/neon/fp16.cpp create mode 100644 src/cpu/kernels/l2normlayer/generic/neon/fp32.cpp create mode 100644 src/cpu/kernels/l2normlayer/generic/neon/impl.cpp create mode 100644 src/cpu/kernels/l2normlayer/generic/neon/impl.h create mode 100644 src/cpu/kernels/l2normlayer/list.h diff --git a/Android.bp b/Android.bp index 957c8e269c..950192c2fb 100644 --- a/Android.bp +++ b/Android.bp @@ -468,6 +468,9 @@ cc_library_static { "src/cpu/kernels/instancenorm/generic/neon/impl.cpp", "src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp", "src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp", + "src/cpu/kernels/l2normlayer/generic/neon/fp16.cpp", + "src/cpu/kernels/l2normlayer/generic/neon/fp32.cpp", + "src/cpu/kernels/l2normlayer/generic/neon/impl.cpp", "src/cpu/kernels/maxunpool/generic/neon/fp16.cpp", "src/cpu/kernels/maxunpool/generic/neon/fp32.cpp", "src/cpu/kernels/maxunpool/generic/neon/impl.cpp", diff --git a/filelist.json b/filelist.json index 88d98ae76e..6e28635411 100644 --- a/filelist.json +++ b/filelist.json @@ -1578,7 +1578,12 @@ "common": [ "src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp", "src/runtime/NEON/functions/NEL2NormalizeLayer.cpp" - ] + ], + "neon":{ + "common":["src/cpu/kernels/l2normlayer/generic/neon/impl.cpp"], + "fp32":["src/cpu/kernels/l2normlayer/generic/neon/fp32.cpp"], + "fp16":["src/cpu/kernels/l2normlayer/generic/neon/fp16.cpp"] + } } }, "Logical": { diff --git a/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp b/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp index 9bda82d416..8ab0288ab1 100644 --- a/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp +++ b/src/core/NEON/kernels/NEL2NormalizeLayerKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,11 +30,13 @@ #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/Window.h" +#include "src/common/cpuinfo/CpuIsaInfo.h" #include "src/core/NEON/NEMath.h" +#include "src/core/common/Registrars.h" #include "src/core/helpers/AutoConfiguration.h" #include "src/core/helpers/WindowHelpers.h" +#include "src/cpu/kernels/l2normlayer/list.h" -#include "src/core/NEON/wrapper/wrapper.h" #include #include @@ -44,90 +46,64 @@ namespace { constexpr int max_input_tensor_dim = 3; -template -void l2_normalize_X(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window) +struct L2NormalizeLayerSelectorData { - using ExactTagType = typename wrapper::traits::neon_vector::tag_type; + DataType dt; + unsigned int actual_axis; + cpuinfo::CpuIsaInfo isa; +}; - const int window_step_x = 16 / data_size_from_type(in->info()->data_type()); - const auto window_start_x = static_cast(window.x().start()); - const auto window_end_x = static_cast(window.x().end()); +using L2NormalizeLayerKernelSelctorPtr = std::add_pointer::type; - Window win_collapsed = window.collapse_if_possible(window, Window::DimZ); - win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1)); +using L2NormalizeLayerPtr = std::add_pointer::type; - Iterator input_it(in, win_collapsed); - Iterator sum_it(sum, win_collapsed); - Iterator output_it(out, win_collapsed); +struct L2NormalizeLayerKernel +{ + const char *name; + const L2NormalizeLayerKernelSelctorPtr is_selected; + L2NormalizeLayerPtr ukernel; +}; - execute_window_loop(win_collapsed, [&](const Coordinates &) +static const L2NormalizeLayerKernel available_kernels[] = +{ { - const auto in_ptr = reinterpret_cast(input_it.ptr()); - const auto out_ptr = reinterpret_cast(output_it.ptr()); - - const T sum_value = *reinterpret_cast(sum_it.ptr()); - const T norm_value = static_cast(1.f) / std::sqrt(std::max(sum_value, static_cast(epsilon))); - const auto vec_norm_value = wrapper::vdup_n(norm_value, ExactTagType{}); - - // Compute elements over vector steps - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) - { - wrapper::vstore(out_ptr + x, wrapper::vmul(wrapper::vloadq(in_ptr + x), vec_norm_value)); - } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - out_ptr[x] = in_ptr[x] * norm_value; - } + "fp32_neon_l2normalize_x", + [](const L2NormalizeLayerSelectorData & data) { return data.dt == DataType::F32 && data.actual_axis == Window::DimX; }, + REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_l2_normalize_x) }, - input_it, sum_it, output_it); -} + { + "fp32_neon_l2normalize_yz", + [](const L2NormalizeLayerSelectorData & data) { return data.dt == DataType::F32 && data.actual_axis != Window::DimX; }, + REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_l2_normalize_yz) + }, + { + "fp16_neon_l2normalize_x", + [](const L2NormalizeLayerSelectorData & data) { return data.dt == DataType::F16 && data.isa.fp16 && data.actual_axis == Window::DimX; }, + REGISTER_FP16_NEON(arm_compute::cpu::neon_fp16_l2_normalize_x), + }, + { + "fp16_neon_l2normalize_yz", + [](const L2NormalizeLayerSelectorData & data) { return data.dt == DataType::F16 && data.isa.fp16 && data.actual_axis != Window::DimX; }, + REGISTER_FP16_NEON(arm_compute::cpu::neon_fp16_l2_normalize_yz), + }, +}; -template -void l2_normalize_YZ(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window, size_t axis) +/** Micro-kernel selector + * + * @param[in] data Selection data passed to help pick the appropriate micro-kernel + * + * @return A matching micro-kernel else nullptr + */ +const L2NormalizeLayerKernel *get_implementation(const L2NormalizeLayerSelectorData &data) { - using ExactTagType = typename wrapper::traits::neon_vector::tag_type; - - const int window_step_x = 16 / data_size_from_type(in->info()->data_type()); - const auto window_start_x = static_cast(window.x().start()); - const auto window_end_x = static_cast(window.x().end()); - - Window win = window; - win.set(Window::DimX, Window::Dimension(0, 1, 1)); - - Window window_sum(win); - window_sum.set(axis, Window::Dimension(0, 0, 0)); - - Iterator input_it(in, win); - Iterator sum_it(sum, window_sum); - Iterator output_it(out, win); - - const auto vec_eps = wrapper::vdup_n(static_cast(epsilon), ExactTagType{}); - - execute_window_loop(win, [&](const Coordinates &) + for(const auto &uk : available_kernels) { - const auto in_ptr = reinterpret_cast(input_it.ptr()); - const auto sum_ptr = reinterpret_cast(sum_it.ptr()); - const auto out_ptr = reinterpret_cast(output_it.ptr()); - - // Compute elements over vector steps - int x = window_start_x; - for(; x <= (window_end_x - window_step_x); x += window_step_x) + if(uk.is_selected(data)) { - const auto vec_norm_value = wrapper::vinvsqrt(wrapper::vmax(wrapper::vloadq(sum_ptr + x), vec_eps)); - wrapper::vstore(out_ptr + x, wrapper::vmul(wrapper::vloadq(in_ptr + x), vec_norm_value)); + return &uk; } - - // Compute left-over elements - for(; x < window_end_x; ++x) - { - const T norm_value = static_cast(1.f) / std::sqrt(std::max(sum_ptr[x], static_cast(epsilon))); - out_ptr[x] = in_ptr[x] * norm_value; - } - }, - input_it, sum_it, output_it); + } + return nullptr; } Status validate_arguments(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, int axis, float epsilon) @@ -212,18 +188,10 @@ void NEL2NormalizeLayerKernel::run(const Window &window, const ThreadInfo &info) ARM_COMPUTE_ERROR("Unsupported normalization axis"); } - switch(_input->info()->data_type()) - { - case DataType::F32: - (_actual_axis == Window::DimX) ? l2_normalize_X(_input, _sum, _output, _epsilon, window) : l2_normalize_YZ(_input, _sum, _output, _epsilon, window, _actual_axis); - break; -#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - case DataType::F16: - (_actual_axis == Window::DimX) ? l2_normalize_X(_input, _sum, _output, _epsilon, window) : l2_normalize_YZ(_input, _sum, _output, _epsilon, window, _actual_axis); - break; -#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - default: - ARM_COMPUTE_ERROR("Not implemented"); - } + const auto *uk = get_implementation(L2NormalizeLayerSelectorData{ _output->info()->data_type(), _actual_axis, CPUInfo::get().get_isa() }); + ARM_COMPUTE_ERROR_ON(uk == nullptr); + ARM_COMPUTE_ERROR_ON(uk->ukernel == nullptr); + + uk->ukernel(_input, _sum, _output, _epsilon, window, _actual_axis); } } // namespace arm_compute diff --git a/src/cpu/kernels/l2normlayer/generic/neon/fp16.cpp b/src/cpu/kernels/l2normlayer/generic/neon/fp16.cpp new file mode 100644 index 0000000000..ed84c10d72 --- /dev/null +++ b/src/cpu/kernels/l2normlayer/generic/neon/fp16.cpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) + +#include "src/cpu/kernels/l2normlayer/generic/neon/impl.h" + +#include "arm_compute/core/Helpers.h" +namespace arm_compute +{ +namespace cpu +{ +void neon_fp16_l2_normalize_x(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window, size_t unused_axis) +{ + ARM_COMPUTE_UNUSED(unused_axis); + return l2_normalize_x(in, sum, out, epsilon, window); +} + +void neon_fp16_l2_normalize_yz(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window, size_t axis) +{ + return l2_normalize_yz(in, sum, out, epsilon, window, axis); +} +} // namespace cpu +} // namespace arm_compute +#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */ diff --git a/src/cpu/kernels/l2normlayer/generic/neon/fp32.cpp b/src/cpu/kernels/l2normlayer/generic/neon/fp32.cpp new file mode 100644 index 0000000000..be32bdc4fa --- /dev/null +++ b/src/cpu/kernels/l2normlayer/generic/neon/fp32.cpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "src/cpu/kernels/l2normlayer/generic/neon/impl.h" + +#include "arm_compute/core/Helpers.h" + +namespace arm_compute +{ +namespace cpu +{ +void neon_fp32_l2_normalize_x(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window, size_t unused_axis) +{ + ARM_COMPUTE_UNUSED(unused_axis); + return l2_normalize_x(in, sum, out, epsilon, window); +} + +void neon_fp32_l2_normalize_yz(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window, size_t axis) +{ + return l2_normalize_yz(in, sum, out, epsilon, window, axis); +} + +} // namespace cpu +} // namespace arm_compute diff --git a/src/cpu/kernels/l2normlayer/generic/neon/impl.cpp b/src/cpu/kernels/l2normlayer/generic/neon/impl.cpp new file mode 100644 index 0000000000..2886537702 --- /dev/null +++ b/src/cpu/kernels/l2normlayer/generic/neon/impl.cpp @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2017-2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "src/cpu/kernels/l2normlayer/generic/neon/impl.h" + +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/TensorInfo.h" +#include "src/core/NEON/wrapper/wrapper.h" +#include "src/core/common/Registrars.h" + +#include + +namespace arm_compute +{ +namespace cpu +{ +template +void l2_normalize_x(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window) +{ + using ExactTagType = typename wrapper::traits::neon_vector::tag_type; + + const int window_step_x = 16 / data_size_from_type(in->info()->data_type()); + const auto window_start_x = static_cast(window.x().start()); + const auto window_end_x = static_cast(window.x().end()); + + Window win_collapsed = window.collapse_if_possible(window, Window::DimZ); + win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1)); + + Iterator input_it(in, win_collapsed); + Iterator sum_it(sum, win_collapsed); + Iterator output_it(out, win_collapsed); + + execute_window_loop(win_collapsed, [&](const Coordinates &) + { + const auto in_ptr = reinterpret_cast(input_it.ptr()); + const auto out_ptr = reinterpret_cast(output_it.ptr()); + + const T sum_value = *reinterpret_cast(sum_it.ptr()); + const T norm_value = static_cast(1.f) / std::sqrt(std::max(sum_value, static_cast(epsilon))); + const auto vec_norm_value = wrapper::vdup_n(norm_value, ExactTagType{}); + + // Compute elements over vector steps + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + wrapper::vstore(out_ptr + x, wrapper::vmul(wrapper::vloadq(in_ptr + x), vec_norm_value)); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + out_ptr[x] = in_ptr[x] * norm_value; + } + }, + input_it, sum_it, output_it); +} + +template +void l2_normalize_yz(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window, size_t axis) +{ + using ExactTagType = typename wrapper::traits::neon_vector::tag_type; + + const int window_step_x = 16 / data_size_from_type(in->info()->data_type()); + const auto window_start_x = static_cast(window.x().start()); + const auto window_end_x = static_cast(window.x().end()); + + Window win = window; + win.set(Window::DimX, Window::Dimension(0, 1, 1)); + + Window window_sum(win); + window_sum.set(axis, Window::Dimension(0, 0, 0)); + + Iterator input_it(in, win); + Iterator sum_it(sum, window_sum); + Iterator output_it(out, win); + + const auto vec_eps = wrapper::vdup_n(static_cast(epsilon), ExactTagType{}); + + execute_window_loop(win, [&](const Coordinates &) + { + const auto in_ptr = reinterpret_cast(input_it.ptr()); + const auto sum_ptr = reinterpret_cast(sum_it.ptr()); + const auto out_ptr = reinterpret_cast(output_it.ptr()); + + // Compute elements over vector steps + int x = window_start_x; + for(; x <= (window_end_x - window_step_x); x += window_step_x) + { + const auto vec_norm_value = wrapper::vinvsqrt(wrapper::vmax(wrapper::vloadq(sum_ptr + x), vec_eps)); + wrapper::vstore(out_ptr + x, wrapper::vmul(wrapper::vloadq(in_ptr + x), vec_norm_value)); + } + + // Compute left-over elements + for(; x < window_end_x; ++x) + { + const T norm_value = static_cast(1.f) / std::sqrt(std::max(sum_ptr[x], static_cast(epsilon))); + out_ptr[x] = in_ptr[x] * norm_value; + } + }, + input_it, sum_it, output_it); +} + +template void l2_normalize_yz(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window, size_t axis); +template void l2_normalize_x(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window); + +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) +template void l2_normalize_yz(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window, size_t axis); +template void l2_normalize_x(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window); +#endif //defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) +} // namespace cpu +} // namespace arm_compute diff --git a/src/cpu/kernels/l2normlayer/generic/neon/impl.h b/src/cpu/kernels/l2normlayer/generic/neon/impl.h new file mode 100644 index 0000000000..98391fb3fd --- /dev/null +++ b/src/cpu/kernels/l2normlayer/generic/neon/impl.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef SRC_CORE_NEON_KERNELS_L2NORMLAYER_LIST_H +#define SRC_CORE_NEON_KERNELS_L2NORMLAYER_LIST_H + +#include + +namespace arm_compute +{ +class ITensor; +class Window; + +namespace cpu +{ +template +void l2_normalize_x(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window); + +template +void l2_normalize_yz(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window, size_t axis); + +} // namespace cpu +} // namespace arm_compute +#endif //SRC_CORE_NEON_KERNELS_L2NORMLAYER_LIST_H diff --git a/src/cpu/kernels/l2normlayer/list.h b/src/cpu/kernels/l2normlayer/list.h new file mode 100644 index 0000000000..2bad7f54f5 --- /dev/null +++ b/src/cpu/kernels/l2normlayer/list.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef SRC_CORE_NEON_KERNELS_L2NORMLAYER_LIST_H +#define SRC_CORE_NEON_KERNELS_L2NORMLAYER_LIST_H +namespace arm_compute +{ +namespace cpu +{ +#define DECLARE_L2NORMLAYER_KERNEL(func_name) \ + void func_name(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window, size_t axis) + +DECLARE_L2NORMLAYER_KERNEL(neon_fp16_l2_normalize_x); +DECLARE_L2NORMLAYER_KERNEL(neon_fp16_l2_normalize_yz); +DECLARE_L2NORMLAYER_KERNEL(neon_fp32_l2_normalize_x); +DECLARE_L2NORMLAYER_KERNEL(neon_fp32_l2_normalize_yz); + +#undef DECLARE_L2NORMLAYER_KERNEL +} // namespace cpu +} // namespace arm_compute +#endif //SRC_CORE_NEON_KERNELS_L2NORMLAYER_LIST_H -- cgit v1.2.1