From 36a75dafdbe6d6a3a6f50bd075fe01f5b7dace38 Mon Sep 17 00:00:00 2001 From: Renato Arantes Date: Fri, 26 Jan 2024 17:31:18 +0000 Subject: =?UTF-8?q?[ONCPUML-1451]=20Add=20matmul=20kernel=20to=20enable=20?= =?UTF-8?q?bf16=20to=20bf16=20operations=20via=20PyTorch=C2=AE=20autocast(?= =?UTF-8?q?)=20function?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The full range of tests must be added with [MLINFSW-482] epic due to the lack of reordering kernels implemented in Acl. Co-Authored-By: David Mansell Change-Id: I820d316295a1ec94fdc89c37e4144a268f914c36 Signed-off-by: Renato Arantes Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11169 Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir Comments-Addressed: Arm Jenkins Benchmark: Arm Jenkins --- Android.bp | 1 + SConscript | 5 +- arm_compute/runtime/NEON/functions/NEMatMul.h | 23 +- cmake/Options.cmake | 4 +- docs/user_guide/operator_list.dox | 3 +- docs/user_guide/release_version_and_change_log.dox | 3 +- filelist.json | 1 + scripts/generate_build_files.py | 4 +- src/BUILD.bazel | 3 +- src/CMakeLists.txt | 3 +- src/core/NEON/kernels/arm_gemm/gemm_bf16bf16.cpp | 79 + .../NEON/kernels/arm_gemm/mergeresults-sve.cpp | 3 +- src/core/NEON/kernels/arm_gemm/mergeresults.cpp | 7 +- .../arm_gemm/merges/a64_merge_fp32_bf16_8x12.hpp | 2809 ++++++++++++++++++++ src/core/NEON/kernels/arm_gemm/merges/list-sve.hpp | 3 +- src/core/NEON/kernels/arm_gemm/merges/list.hpp | 3 +- .../arm_gemm/merges/sve_merge_fp32_bf16_8x3VL.hpp | 2137 +++++++++++++++ src/cpu/operators/CpuMatMul.cpp | 28 +- .../operators/internal/CpuGemmAssemblyDispatch.cpp | 31 +- support/Bfloat16.h | 18 +- tests/SConscript | 5 +- tests/validation/Helpers.h | 45 +- tests/validation/NEON/MatMul.cpp | 402 +-- tests/validation/fixtures/MatMulFixture.h | 383 ++- tests/validation/reference/ActivationLayer.cpp | 27 +- tests/validation/reference/ActivationLayer.h | 23 +- tests/validation/reference/DepthConvertLayer.cpp | 4 +- tests/validation/reference/GEMM.cpp | 79 +- tests/validation/reference/Permute.cpp | 18 +- tests/validation/reference/ReshapeLayer.cpp | 15 +- utils/TypePrinter.h | 2 +- 31 files changed, 5839 insertions(+), 332 deletions(-) create mode 100644 src/core/NEON/kernels/arm_gemm/gemm_bf16bf16.cpp create mode 100644 src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp32_bf16_8x12.hpp create mode 100644 src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_bf16_8x3VL.hpp diff --git a/Android.bp b/Android.bp index 0d087c943b..d216c6785d 100644 --- a/Android.bp +++ b/Android.bp @@ -324,6 +324,7 @@ cc_library_static { "src/core/NEON/kernels/arm_conv/pooling/pooling_u8.cpp", "src/core/NEON/kernels/arm_conv/pooling/pooling_u8q.cpp", "src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp", + "src/core/NEON/kernels/arm_gemm/gemm_bf16bf16.cpp", "src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp", "src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp", "src/core/NEON/kernels/arm_gemm/gemm_int16.cpp", diff --git a/SConscript b/SConscript index 3a8f0e8cba..4c4bcc76f9 100644 --- a/SConscript +++ b/SConscript @@ -716,10 +716,7 @@ Export('bootcode_o') if (env['multi_isa']): lib_static_objs, lib_shared_objs = build_multiisa_lib_objects() - - -# STATIC library build. -if (env['multi_isa']): + # STATIC library build. arm_compute_a = build_library('arm_compute-static', arm_compute_env, lib_static_objs, static=True) else: if 'sve2' in env['arch']: diff --git a/arm_compute/runtime/NEON/functions/NEMatMul.h b/arm_compute/runtime/NEON/functions/NEMatMul.h index 414fc2f3fd..58dd7a6f6b 100644 --- a/arm_compute/runtime/NEON/functions/NEMatMul.h +++ b/arm_compute/runtime/NEON/functions/NEMatMul.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023 Arm Limited. + * Copyright (c) 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ACL_ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEMATMUL -#define ACL_ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEMATMUL +#ifndef ACL_ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEMATMUL_H +#define ACL_ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEMATMUL_H #include "arm_compute/core/Types.h" #include "arm_compute/function_info/ActivationLayerInfo.h" @@ -41,15 +41,27 @@ public: { return _fast_math; } + // get fixed format flag + bool fixed_format() const + { + return _fixed_format; + } // Set fast math flag CpuMatMulSettings &fast_math(bool fmath) { _fast_math = fmath; return *this; - }; + } + // Set fixed format flag + CpuMatMulSettings &fixed_format(bool fixed_format) + { + _fixed_format = fixed_format; + return *this; + } private: bool _fast_math{false}; + bool _fixed_format{false}; }; // Forward declarations @@ -87,6 +99,7 @@ public: * |:--------------|:------------------|:--------------| * |F32 |F32 |F32 | * |F16 |F16 |F16 | + * |BFLOAT16 |BFLOAT16 |BFLOAT16 | * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED | * |QASYMM8 |QASYMM8 |QASYMM8 | * @@ -129,4 +142,4 @@ private: std::unique_ptr _impl; }; } // namespace arm_compute -#endif /* ACL_ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEMATMUL */ +#endif // ACL_ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEMATMUL_H diff --git a/cmake/Options.cmake b/cmake/Options.cmake index e5c8cb8efe..2e351fde7d 100644 --- a/cmake/Options.cmake +++ b/cmake/Options.cmake @@ -1,4 +1,4 @@ -# Copyright (c) 2023 Arm Limited. +# Copyright (c) 2023-2024 Arm Limited. # # SPDX-License-Identifier: MIT # @@ -48,7 +48,7 @@ set(ARM_COMPUTE_ARCH armv8-a CACHE STRING "Architecture to use") # --------------------------------------------------------------------- # Backends -option(ARM_COMPUTE_ENABLE_BF16_VALIDATION "" OFF) +option(ARM_COMPUTE_ENABLE_BF16_VALIDATION "" ON) option(ARM_COMPUTE_ENABLE_SVE_VALIDATION "" OFF) option(ENABLE_NEON "Enable Arm® Neon™ support" ON) diff --git a/docs/user_guide/operator_list.dox b/docs/user_guide/operator_list.dox index 25c856da10..36275e68bf 100644 --- a/docs/user_guide/operator_list.dox +++ b/docs/user_guide/operator_list.dox @@ -1,5 +1,5 @@ /// -/// Copyright (c) 2021-2023 Arm Limited. +/// Copyright (c) 2021-2023,2024 Arm Limited. /// /// SPDX-License-Identifier: MIT /// @@ -2091,6 +2091,7 @@ where N = batches, C = channels, H = height, W = width, D = depth lhsrhsdst F32F32F32 F16F16F16 + BFLOAT16BFLOAT16BFLOAT16 QASYMM8_SIGNEDQASYMM8_SIGNEDQASYMM8_SIGNED QASYMM8QASYMM8QASYMM8 diff --git a/docs/user_guide/release_version_and_change_log.dox b/docs/user_guide/release_version_and_change_log.dox index 2d46737e96..31b756070d 100644 --- a/docs/user_guide/release_version_and_change_log.dox +++ b/docs/user_guide/release_version_and_change_log.dox @@ -42,10 +42,11 @@ If there is more than one release in a month then an extra sequential number is @section S2_2_changelog Changelog v24.04 Public major release + - Add Bfloat16 data type support for @ref NEMatMul. - Optimize start-up time of @ref NEConvolutionLayer for some input configurations where GeMM is selected as the convolution algorithm - Optimize @ref NEConvolutionLayer for input tensor size > 1e7 bytes and weight tensor height > 7 - Performance optimizations: - - Optimize @ref NESoftmaxLayer for axis != 0 by natively supporting higher axes up to axis 3. + - Optimize @ref NESoftmaxLayer for axis != 0 by natively supporting higher axes up to axis 3. v24.02.1 Public patch release - Fix performance regression in fixed-format kernels diff --git a/filelist.json b/filelist.json index d44a7216ac..9f0f302033 100644 --- a/filelist.json +++ b/filelist.json @@ -1586,6 +1586,7 @@ "src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp", "src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp", "src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp", + "src/core/NEON/kernels/arm_gemm/gemm_bf16bf16.cpp", "src/core/NEON/kernels/arm_gemm/gemm_int16.cpp", "src/core/NEON/kernels/arm_gemm/gemm_int8.cpp", "src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp", diff --git a/scripts/generate_build_files.py b/scripts/generate_build_files.py index 17cf49c0a9..f88cf1af44 100644 --- a/scripts/generate_build_files.py +++ b/scripts/generate_build_files.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright (c) 2023 Arm Limited. +# Copyright (c) 2023-2024 Arm Limited. # # SPDX-License-Identifier: MIT # @@ -93,7 +93,7 @@ def resolve_operator_dependencies(filelist, operators, backend=''): return resolved_operators def get_template_header(): - return """# Copyright (c) 2023 Arm Limited. + return """# Copyright (c) 2023-2024 Arm Limited. # # SPDX-License-Identifier: MIT # diff --git a/src/BUILD.bazel b/src/BUILD.bazel index f9d166c525..d4a3b61836 100644 --- a/src/BUILD.bazel +++ b/src/BUILD.bazel @@ -1,4 +1,4 @@ -# Copyright (c) 2023 Arm Limited. +# Copyright (c) 2023-2024 Arm Limited. # # SPDX-License-Identifier: MIT # @@ -509,6 +509,7 @@ filegroup( "core/NEON/kernels/arm_conv/pooling/pooling_u8.cpp", "core/NEON/kernels/arm_conv/pooling/pooling_u8q.cpp", "core/NEON/kernels/arm_gemm/gemm_bf16.cpp", + "core/NEON/kernels/arm_gemm/gemm_bf16bf16.cpp", "core/NEON/kernels/arm_gemm/gemm_fp16.cpp", "core/NEON/kernels/arm_gemm/gemm_fp32.cpp", "core/NEON/kernels/arm_gemm/gemm_int16.cpp", diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c5a172172b..c6410714d2 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (c) 2023 Arm Limited. +# Copyright (c) 2023-2024 Arm Limited. # # SPDX-License-Identifier: MIT # @@ -500,6 +500,7 @@ target_sources( core/NEON/kernels/arm_conv/pooling/pooling_u8.cpp core/NEON/kernels/arm_conv/pooling/pooling_u8q.cpp core/NEON/kernels/arm_gemm/gemm_bf16.cpp + core/NEON/kernels/arm_gemm/gemm_bf16bf16.cpp core/NEON/kernels/arm_gemm/gemm_fp16.cpp core/NEON/kernels/arm_gemm/gemm_fp32.cpp core/NEON/kernels/arm_gemm/gemm_int16.cpp diff --git a/src/core/NEON/kernels/arm_gemm/gemm_bf16bf16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_bf16bf16.cpp new file mode 100644 index 0000000000..aa761b46e4 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/gemm_bf16bf16.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2017-2020, 2022-2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "bfloat.hpp" +#include "gemm_implementation.hpp" +#include "gemm_interleaved.hpp" + +#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS +#include "kernels/a64_ffinterleaved_bf16fp32_mmla_8x12.hpp" +#include "kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp" +#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS + +namespace arm_gemm { + +static const GemmImplementation gemm_bf16bf16_methods[] = +{ +#ifdef __aarch64__ +#ifdef ARM_COMPUTE_ENABLE_BF16 +#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS +GemmImplementation::with_estimate( + GemmMethod::GEMM_INTERLEAVED, + "a64_ffinterleaved_bf16fp32_mmla_8x12", + KernelWeightFormat::VL256_BL64, + [](const GemmArgs &args) { return args._ci->has_bf16(); }, + [](const GemmArgs &args) { return GemmInterleavedFixedFormat::estimate_cycles(args); }, + [](const GemmArgs &args) { return new GemmInterleavedFixedFormat(args); } +), +GemmImplementation::with_estimate( + GemmMethod::GEMM_INTERLEAVED, + "sve_ffinterleaved_bf16fp32_mmla_8x3VL", + KernelWeightFormat::VL2VL_BL64, + [](const GemmArgs &args) { return args._ci->has_svebf16(); }, + [](const GemmArgs &args) { return GemmInterleavedFixedFormat::estimate_cycles(args); }, + [](const GemmArgs &args) { return new GemmInterleavedFixedFormat(args); } +), +#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS +#endif // ARM_COMPUTE_ENABLE_BF16 +#endif // __aarch64__ +{ + GemmMethod::DEFAULT, + "", + nullptr, + nullptr, + nullptr +} +}; + +template<> +const GemmImplementation *gemm_implementation_list() { + return gemm_bf16bf16_methods; +} + +/* Explicitly instantiate the external functions for these types. */ +template UniqueGemmCommon gemm(const GemmArgs &args, const Nothing &); +template bool has_opt_gemm(WeightFormat &weight_format, const GemmArgs &args, const Nothing &); +template KernelDescription get_gemm_method(const GemmArgs &args, const Nothing &); +template std::vector get_compatible_kernels(const GemmArgs &args, const Nothing &); + +} // namespace arm_gemm diff --git a/src/core/NEON/kernels/arm_gemm/mergeresults-sve.cpp b/src/core/NEON/kernels/arm_gemm/mergeresults-sve.cpp index a4124c4a54..d3665534a5 100644 --- a/src/core/NEON/kernels/arm_gemm/mergeresults-sve.cpp +++ b/src/core/NEON/kernels/arm_gemm/mergeresults-sve.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,6 +30,7 @@ #include "arm_gemm.hpp" #include "asmlib.hpp" +#include "bfloat.hpp" #include "utils.hpp" #include "mergeresults.hpp" diff --git a/src/core/NEON/kernels/arm_gemm/mergeresults.cpp b/src/core/NEON/kernels/arm_gemm/mergeresults.cpp index 2b712cee61..e100d9fe46 100644 --- a/src/core/NEON/kernels/arm_gemm/mergeresults.cpp +++ b/src/core/NEON/kernels/arm_gemm/mergeresults.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -30,6 +30,7 @@ #include "arm_gemm.hpp" #include "asmlib.hpp" +#include "bfloat.hpp" #include "utils.hpp" namespace arm_gemm { @@ -114,4 +115,8 @@ template void MergeResults<12u, 8u, false, float, __fp16>(__fp16*, float const*, template void MergeResults<8u, 6u, false, float, __fp16>(__fp16*, float const*, int, int, int, int, int, __fp16 const*, Activation, bool); #endif +#if defined(__arm__) && defined(ARM_COMPUTE_ENABLE_BF16) +template void MergeResults<8u, 6u, false, float, bfloat16>(bfloat16*, float const*, int, int, int, int, int, bfloat16 const*, Activation, bool); +#endif + } // namespace arm_gemm diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp32_bf16_8x12.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp32_bf16_8x12.hpp new file mode 100644 index 0000000000..a57a855e31 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp32_bf16_8x12.hpp @@ -0,0 +1,2809 @@ +/* + * Copyright (c) 2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once +#ifdef __aarch64__ + +template<> +void MergeResults<12, 8, false>( + bfloat16 *out_ptr, + const float * in_ptr, + const int ldout, + const int y0, const int ymax, + const int x0, const int xmax, + const bfloat16 *bias, + Activation act, + bool accumulate) +{ + float maxval = static_cast(std::numeric_limits::infinity()); + float minval = - static_cast(std::numeric_limits::infinity()); + + switch(act.type) { + default: + case Activation::Type::None: + break; + case Activation::Type::BoundedReLU: + maxval = static_cast(act.param1); + /* fall through */ + case Activation::Type::ReLU: + minval = 0; + break; + } + + size_t rows = ymax-y0; + size_t cols = xmax-x0; + + out_ptr += (y0 * ldout) + x0; + bias = (bias == nullptr) ? nullptr : bias + x0; + + __asm__ __volatile__( + "cbz %x[cols], 108f\n" + "cbz %x[rows], 108f\n" + "mov x11, #0x20\n" + "dup v13.4s, %w[maxval]\n" + "dup v12.4s, %w[minval]\n" + "mul x11, %x[ldout], x11\n" + "cbnz %x[accumulate], 66f\n" + "1:" // Initial: Row loop + "cmp %x[rows], #0x7\n" + "bgt 58f\n" + "beq 50f\n" + "cmp %x[rows], #0x5\n" + "bgt 42f\n" + "beq 34f\n" + "cmp %x[rows], #0x3\n" + "bgt 26f\n" + "beq 18f\n" + "cmp %x[rows], #0x1\n" + "bgt 10f\n" + "2:" // Initial: Height 1 + "mov x10, %x[cols]\n" + "mov x9, %x[out_ptr]\n" + "mov x28, %x[bias]\n" + "cmp x10, #0xc\n" + "blt 6f\n" + "3:" // Initial: Height 1: Block loop + "cbnz %x[bias], 4f\n" + "movi v21.16b, #0x0\n" + "movi v20.16b, #0x0\n" + "movi v19.16b, #0x0\n" + "b 5f\n" + "4:" // Initial: Height 1: Width 3: bias + "ldr d18, [x28, #0x0]\n" + "ldr d17, [x28, #0x8]\n" + "ldr d16, [x28, #0x10]\n" + "shll v21.4s, v18.4h, #0x10\n" + "shll v20.4s, v17.4h, #0x10\n" + "shll v19.4s, v16.4h, #0x10\n" + "5:" // Initial: Height 1: Width 3: init done + "ldr q18, [%x[in_ptr], #0x0]\n" + "ldr q17, [%x[in_ptr], #0x10]\n" + "sub x10, x10, #0xc\n" + "add x28, x28, #0x18\n" + "ldr q16, [%x[in_ptr], #0x20]\n" + "cmp x10, #0xc\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fadd v18.4s, v18.4s, v21.4s\n" + "fadd v17.4s, v17.4s, v20.4s\n" + "fadd v16.4s, v16.4s, v19.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + "str d18, [x9, #0x0]\n" + "str d17, [x9, #0x8]\n" + "str d16, [x9, #0x10]\n" + "add x9, x9, #0x18\n" + "bge 3b\n" + "6:" // Initial: Height 1: no full blocks + "cbz x10, 9f\n" + "mov x20, %x[in_ptr]\n" + "7:" // Initial: Height 1: Single loop + "movi v17.16b, #0x0\n" + "cbz %x[bias], 8f\n" + "ldr h16, [x28, #0x0]\n" + "shll v17.4s, v16.4h, #0x10\n" + "8:" // Initial: Height 1: Scalar: no bias + "ldr s16, [%x[in_ptr], #0x0]\n" + "subs x10, x10, #0x1\n" + "add x28, x28, #0x2\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "fadd v16.4s, v16.4s, v17.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + "str h16, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + "bne 7b\n" + "add %x[in_ptr], x20, #0x180\n" + "9:" // Initial: Height 1: no oddments + "b 108f\n" + "10:" // Initial: Height 2 + "mov x10, %x[cols]\n" + "mov x9, %x[out_ptr]\n" + "mov x28, %x[bias]\n" + "cmp x10, #0xc\n" + "add x27, x9, %x[ldout], LSL #1\n" + "blt 14f\n" + "11:" // Initial: Height 2: Block loop + "cbnz %x[bias], 12f\n" + "movi v24.16b, #0x0\n" + "movi v23.16b, #0x0\n" + "movi v22.16b, #0x0\n" + "b 13f\n" + "12:" // Initial: Height 2: Width 3: bias + "ldr d18, [x28, #0x0]\n" + "ldr d17, [x28, #0x8]\n" + "ldr d16, [x28, #0x10]\n" + "shll v24.4s, v18.4h, #0x10\n" + "shll v23.4s, v17.4h, #0x10\n" + "shll v22.4s, v16.4h, #0x10\n" + "13:" // Initial: Height 2: Width 3: init done + "ldr q16, [%x[in_ptr], #0x0]\n" + "ldr q20, [%x[in_ptr], #0x10]\n" + "sub x10, x10, #0xc\n" + "add x28, x28, #0x18\n" + "ldr q19, [%x[in_ptr], #0x20]\n" + "ldr q18, [%x[in_ptr], #0x30]\n" + "cmp x10, #0xc\n" + "ldr q17, [%x[in_ptr], #0x40]\n" + "ldr q21, [%x[in_ptr], #0x50]\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fadd v16.4s, v16.4s, v24.4s\n" + "fadd v20.4s, v20.4s, v23.4s\n" + "fadd v19.4s, v19.4s, v22.4s\n" + "fadd v18.4s, v18.4s, v24.4s\n" + "fadd v17.4s, v17.4s, v23.4s\n" + "fadd v21.4s, v21.4s, v22.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n" + ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + "str d16, [x9, #0x0]\n" + ".inst 0x0ea16ab0 // bfcvtn v16.4h, v21.4s\n" + "str d20, [x9, #0x8]\n" + "str d19, [x9, #0x10]\n" + "add x9, x9, #0x18\n" + "str d18, [x27, #0x0]\n" + "str d17, [x27, #0x8]\n" + "str d16, [x27, #0x10]\n" + "add x27, x27, #0x18\n" + "bge 11b\n" + "14:" // Initial: Height 2: no full blocks + "cbz x10, 17f\n" + "mov x20, %x[in_ptr]\n" + "15:" // Initial: Height 2: Single loop + "movi v18.16b, #0x0\n" + "cbz %x[bias], 16f\n" + "ldr h16, [x28, #0x0]\n" + "shll v18.4s, v16.4h, #0x10\n" + "16:" // Initial: Height 2: Scalar: no bias + "ldr s17, [%x[in_ptr], #0x0]\n" + "ldr s16, [%x[in_ptr], #0x30]\n" + "subs x10, x10, #0x1\n" + "add x28, x28, #0x2\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "fadd v17.4s, v17.4s, v18.4s\n" + "fadd v16.4s, v16.4s, v18.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + "str h17, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + "str h16, [x27, #0x0]\n" + "add x27, x27, #0x2\n" + "bne 15b\n" + "add %x[in_ptr], x20, #0x180\n" + "17:" // Initial: Height 2: no oddments + "b 108f\n" + "18:" // Initial: Height 3 + "mov x10, %x[cols]\n" + "mov x9, %x[out_ptr]\n" + "mov x28, %x[bias]\n" + "add x27, x9, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "cmp x10, #0xc\n" + "blt 22f\n" + "19:" // Initial: Height 3: Block loop + "cbnz %x[bias], 20f\n" + "movi v27.16b, #0x0\n" + "movi v26.16b, #0x0\n" + "movi v25.16b, #0x0\n" + "b 21f\n" + "20:" // Initial: Height 3: Width 3: bias + "ldr d18, [x28, #0x0]\n" + "ldr d17, [x28, #0x8]\n" + "ldr d16, [x28, #0x10]\n" + "shll v27.4s, v18.4h, #0x10\n" + "shll v26.4s, v17.4h, #0x10\n" + "shll v25.4s, v16.4h, #0x10\n" + "21:" // Initial: Height 3: Width 3: init done + "ldr q18, [%x[in_ptr], #0x0]\n" + "ldr q17, [%x[in_ptr], #0x10]\n" + "sub x10, x10, #0xc\n" + "add x28, x28, #0x18\n" + "ldr q16, [%x[in_ptr], #0x20]\n" + "ldr q21, [%x[in_ptr], #0x30]\n" + "cmp x10, #0xc\n" + "ldr q20, [%x[in_ptr], #0x40]\n" + "ldr q19, [%x[in_ptr], #0x50]\n" + "ldr q24, [%x[in_ptr], #0x60]\n" + "ldr q23, [%x[in_ptr], #0x70]\n" + "fadd v18.4s, v18.4s, v27.4s\n" + "fadd v17.4s, v17.4s, v26.4s\n" + "ldr q22, [%x[in_ptr], #0x80]\n" + "fadd v16.4s, v16.4s, v25.4s\n" + "fadd v21.4s, v21.4s, v27.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fadd v20.4s, v20.4s, v26.4s\n" + "fadd v19.4s, v19.4s, v25.4s\n" + "fadd v24.4s, v24.4s, v27.4s\n" + "fadd v23.4s, v23.4s, v26.4s\n" + "fadd v22.4s, v22.4s, v25.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n" + ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n" + "str d18, [x9, #0x0]\n" + ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n" + ".inst 0x0ea16b12 // bfcvtn v18.4h, v24.4s\n" + "str d17, [x9, #0x8]\n" + "str d16, [x9, #0x10]\n" + ".inst 0x0ea16af1 // bfcvtn v17.4h, v23.4s\n" + ".inst 0x0ea16ad0 // bfcvtn v16.4h, v22.4s\n" + "add x9, x9, #0x18\n" + "str d21, [x27, #0x0]\n" + "str d20, [x27, #0x8]\n" + "str d19, [x27, #0x10]\n" + "add x27, x27, #0x18\n" + "str d18, [x26, #0x0]\n" + "str d17, [x26, #0x8]\n" + "str d16, [x26, #0x10]\n" + "add x26, x26, #0x18\n" + "bge 19b\n" + "22:" // Initial: Height 3: no full blocks + "cbz x10, 25f\n" + "mov x20, %x[in_ptr]\n" + "23:" // Initial: Height 3: Single loop + "movi v19.16b, #0x0\n" + "cbz %x[bias], 24f\n" + "ldr h16, [x28, #0x0]\n" + "shll v19.4s, v16.4h, #0x10\n" + "24:" // Initial: Height 3: Scalar: no bias + "ldr s16, [%x[in_ptr], #0x0]\n" + "ldr s17, [%x[in_ptr], #0x30]\n" + "subs x10, x10, #0x1\n" + "add x28, x28, #0x2\n" + "ldr s18, [%x[in_ptr], #0x60]\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "fadd v16.4s, v16.4s, v19.4s\n" + "fadd v17.4s, v17.4s, v19.4s\n" + "fadd v18.4s, v18.4s, v19.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + "str h16, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + ".inst 0x0ea16a50 // bfcvtn v16.4h, v18.4s\n" + "str h17, [x27, #0x0]\n" + "add x27, x27, #0x2\n" + "str h16, [x26, #0x0]\n" + "add x26, x26, #0x2\n" + "bne 23b\n" + "add %x[in_ptr], x20, #0x180\n" + "25:" // Initial: Height 3: no oddments + "b 108f\n" + "26:" // Initial: Height 4 + "mov x9, %x[out_ptr]\n" + "mov x10, %x[cols]\n" + "mov x28, %x[bias]\n" + "add x27, x9, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "cmp x10, #0xc\n" + "add x25, x26, %x[ldout], LSL #1\n" + "blt 30f\n" + "27:" // Initial: Height 4: Block loop + "cbnz %x[bias], 28f\n" + "movi v30.16b, #0x0\n" + "movi v29.16b, #0x0\n" + "movi v28.16b, #0x0\n" + "b 29f\n" + "28:" // Initial: Height 4: Width 3: bias + "ldr d18, [x28, #0x0]\n" + "ldr d17, [x28, #0x8]\n" + "ldr d16, [x28, #0x10]\n" + "shll v30.4s, v18.4h, #0x10\n" + "shll v29.4s, v17.4h, #0x10\n" + "shll v28.4s, v16.4h, #0x10\n" + "29:" // Initial: Height 4: Width 3: init done + "ldr q19, [%x[in_ptr], #0x0]\n" + "ldr q18, [%x[in_ptr], #0x10]\n" + "sub x10, x10, #0xc\n" + "add x28, x28, #0x18\n" + "ldr q17, [%x[in_ptr], #0x20]\n" + "ldr q16, [%x[in_ptr], #0x30]\n" + "cmp x10, #0xc\n" + "ldr q23, [%x[in_ptr], #0x40]\n" + "ldr q22, [%x[in_ptr], #0x50]\n" + "ldr q21, [%x[in_ptr], #0x60]\n" + "ldr q20, [%x[in_ptr], #0x70]\n" + "fadd v19.4s, v19.4s, v30.4s\n" + "fadd v18.4s, v18.4s, v29.4s\n" + "ldr q27, [%x[in_ptr], #0x80]\n" + "ldr q26, [%x[in_ptr], #0x90]\n" + "fadd v17.4s, v17.4s, v28.4s\n" + "fadd v16.4s, v16.4s, v30.4s\n" + "ldr q25, [%x[in_ptr], #0xa0]\n" + "ldr q24, [%x[in_ptr], #0xb0]\n" + "fadd v23.4s, v23.4s, v29.4s\n" + "fadd v22.4s, v22.4s, v28.4s\n" + "fadd v21.4s, v21.4s, v30.4s\n" + "fadd v20.4s, v20.4s, v29.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fadd v27.4s, v27.4s, v28.4s\n" + "fadd v26.4s, v26.4s, v30.4s\n" + "fadd v25.4s, v25.4s, v29.4s\n" + "fadd v24.4s, v24.4s, v28.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmin v27.4s, v27.4s, v13.4s\n" + "fmin v26.4s, v26.4s, v13.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + "fmax v27.4s, v27.4s, v12.4s\n" + "fmax v26.4s, v26.4s, v12.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + ".inst 0x0ea16af7 // bfcvtn v23.4h, v23.4s\n" + ".inst 0x0ea16ad6 // bfcvtn v22.4h, v22.4s\n" + "str d19, [x9, #0x0]\n" + "str d18, [x9, #0x8]\n" + ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n" + ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n" + "str d17, [x9, #0x10]\n" + ".inst 0x0ea16b73 // bfcvtn v19.4h, v27.4s\n" + ".inst 0x0ea16b52 // bfcvtn v18.4h, v26.4s\n" + "add x9, x9, #0x18\n" + "str d16, [x27, #0x0]\n" + ".inst 0x0ea16b31 // bfcvtn v17.4h, v25.4s\n" + ".inst 0x0ea16b10 // bfcvtn v16.4h, v24.4s\n" + "str d23, [x27, #0x8]\n" + "str d22, [x27, #0x10]\n" + "add x27, x27, #0x18\n" + "str d21, [x26, #0x0]\n" + "str d20, [x26, #0x8]\n" + "str d19, [x26, #0x10]\n" + "add x26, x26, #0x18\n" + "str d18, [x25, #0x0]\n" + "str d17, [x25, #0x8]\n" + "str d16, [x25, #0x10]\n" + "add x25, x25, #0x18\n" + "bge 27b\n" + "30:" // Initial: Height 4: no full blocks + "cbz x10, 33f\n" + "mov x20, %x[in_ptr]\n" + "31:" // Initial: Height 4: Single loop + "movi v20.16b, #0x0\n" + "cbz %x[bias], 32f\n" + "ldr h16, [x28, #0x0]\n" + "shll v20.4s, v16.4h, #0x10\n" + "32:" // Initial: Height 4: Scalar: no bias + "ldr s16, [%x[in_ptr], #0x0]\n" + "ldr s18, [%x[in_ptr], #0x30]\n" + "subs x10, x10, #0x1\n" + "add x28, x28, #0x2\n" + "ldr s17, [%x[in_ptr], #0x60]\n" + "ldr s19, [%x[in_ptr], #0x90]\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "fadd v16.4s, v16.4s, v20.4s\n" + "fadd v18.4s, v18.4s, v20.4s\n" + "fadd v17.4s, v17.4s, v20.4s\n" + "fadd v19.4s, v19.4s, v20.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + "str h16, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + ".inst 0x0ea16a70 // bfcvtn v16.4h, v19.4s\n" + "str h18, [x27, #0x0]\n" + "add x27, x27, #0x2\n" + "str h17, [x26, #0x0]\n" + "add x26, x26, #0x2\n" + "str h16, [x25, #0x0]\n" + "add x25, x25, #0x2\n" + "bne 31b\n" + "add %x[in_ptr], x20, #0x180\n" + "33:" // Initial: Height 4: no oddments + "b 108f\n" + "34:" // Initial: Height 5 + "mov x9, %x[out_ptr]\n" + "mov x10, %x[cols]\n" + "mov x28, %x[bias]\n" + "add x27, x9, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "cmp x10, #0xc\n" + "add x24, x25, %x[ldout], LSL #1\n" + "blt 38f\n" + "35:" // Initial: Height 5: Block loop + "cbnz %x[bias], 36f\n" + "movi v1.16b, #0x0\n" + "movi v0.16b, #0x0\n" + "movi v31.16b, #0x0\n" + "b 37f\n" + "36:" // Initial: Height 5: Width 3: bias + "ldr d18, [x28, #0x0]\n" + "ldr d17, [x28, #0x8]\n" + "ldr d16, [x28, #0x10]\n" + "shll v1.4s, v18.4h, #0x10\n" + "shll v0.4s, v17.4h, #0x10\n" + "shll v31.4s, v16.4h, #0x10\n" + "37:" // Initial: Height 5: Width 3: init done + "ldr q16, [%x[in_ptr], #0x0]\n" + "ldr q20, [%x[in_ptr], #0x10]\n" + "sub x10, x10, #0xc\n" + "add x28, x28, #0x18\n" + "ldr q19, [%x[in_ptr], #0x20]\n" + "ldr q18, [%x[in_ptr], #0x30]\n" + "cmp x10, #0xc\n" + "ldr q17, [%x[in_ptr], #0x40]\n" + "ldr q30, [%x[in_ptr], #0x50]\n" + "ldr q24, [%x[in_ptr], #0x60]\n" + "ldr q23, [%x[in_ptr], #0x70]\n" + "fadd v16.4s, v16.4s, v1.4s\n" + "fadd v20.4s, v20.4s, v0.4s\n" + "ldr q22, [%x[in_ptr], #0x80]\n" + "ldr q21, [%x[in_ptr], #0x90]\n" + "fadd v19.4s, v19.4s, v31.4s\n" + "fadd v18.4s, v18.4s, v1.4s\n" + "ldr q29, [%x[in_ptr], #0xa0]\n" + "ldr q28, [%x[in_ptr], #0xb0]\n" + "fadd v17.4s, v17.4s, v0.4s\n" + "fadd v30.4s, v30.4s, v31.4s\n" + "ldr q27, [%x[in_ptr], #0xc0]\n" + "ldr q26, [%x[in_ptr], #0xd0]\n" + "fadd v24.4s, v24.4s, v1.4s\n" + "fadd v23.4s, v23.4s, v0.4s\n" + "ldr q25, [%x[in_ptr], #0xe0]\n" + "fadd v22.4s, v22.4s, v31.4s\n" + "fadd v21.4s, v21.4s, v1.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fadd v29.4s, v29.4s, v0.4s\n" + "fadd v28.4s, v28.4s, v31.4s\n" + "fadd v27.4s, v27.4s, v1.4s\n" + "fadd v26.4s, v26.4s, v0.4s\n" + "fadd v25.4s, v25.4s, v31.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v30.4s, v30.4s, v13.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmin v29.4s, v29.4s, v13.4s\n" + "fmin v28.4s, v28.4s, v13.4s\n" + "fmin v27.4s, v27.4s, v13.4s\n" + "fmin v26.4s, v26.4s, v13.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v30.4s, v30.4s, v12.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + "fmax v29.4s, v29.4s, v12.4s\n" + "fmax v28.4s, v28.4s, v12.4s\n" + "fmax v27.4s, v27.4s, v12.4s\n" + "fmax v26.4s, v26.4s, v12.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n" + ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + "str d16, [x9, #0x0]\n" + ".inst 0x0ea16bd0 // bfcvtn v16.4h, v30.4s\n" + ".inst 0x0ea16b18 // bfcvtn v24.4h, v24.4s\n" + "str d20, [x9, #0x8]\n" + "str d19, [x9, #0x10]\n" + ".inst 0x0ea16af7 // bfcvtn v23.4h, v23.4s\n" + ".inst 0x0ea16ad6 // bfcvtn v22.4h, v22.4s\n" + "add x9, x9, #0x18\n" + "str d18, [x27, #0x0]\n" + ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n" + ".inst 0x0ea16bb4 // bfcvtn v20.4h, v29.4s\n" + "str d17, [x27, #0x8]\n" + ".inst 0x0ea16b93 // bfcvtn v19.4h, v28.4s\n" + ".inst 0x0ea16b72 // bfcvtn v18.4h, v27.4s\n" + "str d16, [x27, #0x10]\n" + ".inst 0x0ea16b51 // bfcvtn v17.4h, v26.4s\n" + ".inst 0x0ea16b30 // bfcvtn v16.4h, v25.4s\n" + "add x27, x27, #0x18\n" + "str d24, [x26, #0x0]\n" + "str d23, [x26, #0x8]\n" + "str d22, [x26, #0x10]\n" + "add x26, x26, #0x18\n" + "str d21, [x25, #0x0]\n" + "str d20, [x25, #0x8]\n" + "str d19, [x25, #0x10]\n" + "add x25, x25, #0x18\n" + "str d18, [x24, #0x0]\n" + "str d17, [x24, #0x8]\n" + "str d16, [x24, #0x10]\n" + "add x24, x24, #0x18\n" + "bge 35b\n" + "38:" // Initial: Height 5: no full blocks + "cbz x10, 41f\n" + "mov x20, %x[in_ptr]\n" + "39:" // Initial: Height 5: Single loop + "movi v21.16b, #0x0\n" + "cbz %x[bias], 40f\n" + "ldr h16, [x28, #0x0]\n" + "shll v21.4s, v16.4h, #0x10\n" + "40:" // Initial: Height 5: Scalar: no bias + "ldr s16, [%x[in_ptr], #0x0]\n" + "ldr s19, [%x[in_ptr], #0x30]\n" + "subs x10, x10, #0x1\n" + "add x28, x28, #0x2\n" + "ldr s18, [%x[in_ptr], #0x60]\n" + "ldr s17, [%x[in_ptr], #0x90]\n" + "ldr s20, [%x[in_ptr], #0xc0]\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "fadd v16.4s, v16.4s, v21.4s\n" + "fadd v19.4s, v19.4s, v21.4s\n" + "fadd v18.4s, v18.4s, v21.4s\n" + "fadd v17.4s, v17.4s, v21.4s\n" + "fadd v20.4s, v20.4s, v21.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + "str h16, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + ".inst 0x0ea16a90 // bfcvtn v16.4h, v20.4s\n" + "str h19, [x27, #0x0]\n" + "add x27, x27, #0x2\n" + "str h18, [x26, #0x0]\n" + "add x26, x26, #0x2\n" + "str h17, [x25, #0x0]\n" + "add x25, x25, #0x2\n" + "str h16, [x24, #0x0]\n" + "add x24, x24, #0x2\n" + "bne 39b\n" + "add %x[in_ptr], x20, #0x180\n" + "41:" // Initial: Height 5: no oddments + "b 108f\n" + "42:" // Initial: Height 6 + "mov x9, %x[out_ptr]\n" + "mov x10, %x[cols]\n" + "mov x28, %x[bias]\n" + "add x27, x9, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "add x24, x25, %x[ldout], LSL #1\n" + "cmp x10, #0xc\n" + "add x23, x24, %x[ldout], LSL #1\n" + "blt 46f\n" + "43:" // Initial: Height 6: Block loop + "cbnz %x[bias], 44f\n" + "movi v4.16b, #0x0\n" + "movi v3.16b, #0x0\n" + "movi v2.16b, #0x0\n" + "b 45f\n" + "44:" // Initial: Height 6: Width 3: bias + "ldr d18, [x28, #0x0]\n" + "ldr d17, [x28, #0x8]\n" + "ldr d16, [x28, #0x10]\n" + "shll v4.4s, v18.4h, #0x10\n" + "shll v3.4s, v17.4h, #0x10\n" + "shll v2.4s, v16.4h, #0x10\n" + "45:" // Initial: Height 6: Width 3: init done + "ldr q21, [%x[in_ptr], #0x0]\n" + "ldr q16, [%x[in_ptr], #0x10]\n" + "sub x10, x10, #0xc\n" + "add x28, x28, #0x18\n" + "ldr q20, [%x[in_ptr], #0x20]\n" + "ldr q19, [%x[in_ptr], #0x30]\n" + "cmp x10, #0xc\n" + "ldr q18, [%x[in_ptr], #0x40]\n" + "ldr q17, [%x[in_ptr], #0x50]\n" + "ldr q1, [%x[in_ptr], #0x60]\n" + "ldr q26, [%x[in_ptr], #0x70]\n" + "fadd v21.4s, v21.4s, v4.4s\n" + "fadd v16.4s, v16.4s, v3.4s\n" + "ldr q25, [%x[in_ptr], #0x80]\n" + "ldr q24, [%x[in_ptr], #0x90]\n" + "fadd v20.4s, v20.4s, v2.4s\n" + "fadd v19.4s, v19.4s, v4.4s\n" + "ldr q23, [%x[in_ptr], #0xa0]\n" + "ldr q22, [%x[in_ptr], #0xb0]\n" + "fadd v18.4s, v18.4s, v3.4s\n" + "fadd v17.4s, v17.4s, v2.4s\n" + "ldr q0, [%x[in_ptr], #0xc0]\n" + "ldr q31, [%x[in_ptr], #0xd0]\n" + "fadd v1.4s, v1.4s, v4.4s\n" + "fadd v26.4s, v26.4s, v3.4s\n" + "ldr q30, [%x[in_ptr], #0xe0]\n" + "ldr q29, [%x[in_ptr], #0xf0]\n" + "fadd v25.4s, v25.4s, v2.4s\n" + "fadd v24.4s, v24.4s, v4.4s\n" + "ldr q28, [%x[in_ptr], #0x100]\n" + "ldr q27, [%x[in_ptr], #0x110]\n" + "fadd v23.4s, v23.4s, v3.4s\n" + "fadd v22.4s, v22.4s, v2.4s\n" + "fadd v0.4s, v0.4s, v4.4s\n" + "fadd v31.4s, v31.4s, v3.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fadd v30.4s, v30.4s, v2.4s\n" + "fadd v29.4s, v29.4s, v4.4s\n" + "fadd v28.4s, v28.4s, v3.4s\n" + "fadd v27.4s, v27.4s, v2.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v1.4s, v1.4s, v13.4s\n" + "fmin v26.4s, v26.4s, v13.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmin v0.4s, v0.4s, v13.4s\n" + "fmin v31.4s, v31.4s, v13.4s\n" + "fmin v30.4s, v30.4s, v13.4s\n" + "fmin v29.4s, v29.4s, v13.4s\n" + "fmin v28.4s, v28.4s, v13.4s\n" + "fmin v27.4s, v27.4s, v13.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v1.4s, v1.4s, v12.4s\n" + "fmax v26.4s, v26.4s, v12.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + "fmax v0.4s, v0.4s, v12.4s\n" + "fmax v31.4s, v31.4s, v12.4s\n" + "fmax v30.4s, v30.4s, v12.4s\n" + "fmax v29.4s, v29.4s, v12.4s\n" + "fmax v28.4s, v28.4s, v12.4s\n" + "fmax v27.4s, v27.4s, v12.4s\n" + ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n" + ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + "str d21, [x9, #0x0]\n" + "str d16, [x9, #0x8]\n" + ".inst 0x0ea16830 // bfcvtn v16.4h, v1.4s\n" + ".inst 0x0ea16b5a // bfcvtn v26.4h, v26.4s\n" + "str d20, [x9, #0x10]\n" + ".inst 0x0ea16b39 // bfcvtn v25.4h, v25.4s\n" + ".inst 0x0ea16b18 // bfcvtn v24.4h, v24.4s\n" + "add x9, x9, #0x18\n" + "str d19, [x27, #0x0]\n" + ".inst 0x0ea16af7 // bfcvtn v23.4h, v23.4s\n" + ".inst 0x0ea16ad6 // bfcvtn v22.4h, v22.4s\n" + "str d18, [x27, #0x8]\n" + ".inst 0x0ea16815 // bfcvtn v21.4h, v0.4s\n" + ".inst 0x0ea16bf4 // bfcvtn v20.4h, v31.4s\n" + "str d17, [x27, #0x10]\n" + ".inst 0x0ea16bd3 // bfcvtn v19.4h, v30.4s\n" + ".inst 0x0ea16bb2 // bfcvtn v18.4h, v29.4s\n" + "add x27, x27, #0x18\n" + "str d16, [x26, #0x0]\n" + ".inst 0x0ea16b91 // bfcvtn v17.4h, v28.4s\n" + ".inst 0x0ea16b70 // bfcvtn v16.4h, v27.4s\n" + "str d26, [x26, #0x8]\n" + "str d25, [x26, #0x10]\n" + "add x26, x26, #0x18\n" + "str d24, [x25, #0x0]\n" + "str d23, [x25, #0x8]\n" + "str d22, [x25, #0x10]\n" + "add x25, x25, #0x18\n" + "str d21, [x24, #0x0]\n" + "str d20, [x24, #0x8]\n" + "str d19, [x24, #0x10]\n" + "add x24, x24, #0x18\n" + "str d18, [x23, #0x0]\n" + "str d17, [x23, #0x8]\n" + "str d16, [x23, #0x10]\n" + "add x23, x23, #0x18\n" + "bge 43b\n" + "46:" // Initial: Height 6: no full blocks + "cbz x10, 49f\n" + "mov x20, %x[in_ptr]\n" + "47:" // Initial: Height 6: Single loop + "movi v22.16b, #0x0\n" + "cbz %x[bias], 48f\n" + "ldr h16, [x28, #0x0]\n" + "shll v22.4s, v16.4h, #0x10\n" + "48:" // Initial: Height 6: Scalar: no bias + "ldr s16, [%x[in_ptr], #0x0]\n" + "ldr s20, [%x[in_ptr], #0x30]\n" + "subs x10, x10, #0x1\n" + "add x28, x28, #0x2\n" + "ldr s19, [%x[in_ptr], #0x60]\n" + "ldr s18, [%x[in_ptr], #0x90]\n" + "ldr s17, [%x[in_ptr], #0xc0]\n" + "ldr s21, [%x[in_ptr], #0xf0]\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "fadd v16.4s, v16.4s, v22.4s\n" + "fadd v20.4s, v20.4s, v22.4s\n" + "fadd v19.4s, v19.4s, v22.4s\n" + "fadd v18.4s, v18.4s, v22.4s\n" + "fadd v17.4s, v17.4s, v22.4s\n" + "fadd v21.4s, v21.4s, v22.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n" + ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + "str h16, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + ".inst 0x0ea16ab0 // bfcvtn v16.4h, v21.4s\n" + "str h20, [x27, #0x0]\n" + "add x27, x27, #0x2\n" + "str h19, [x26, #0x0]\n" + "add x26, x26, #0x2\n" + "str h18, [x25, #0x0]\n" + "add x25, x25, #0x2\n" + "str h17, [x24, #0x0]\n" + "add x24, x24, #0x2\n" + "str h16, [x23, #0x0]\n" + "add x23, x23, #0x2\n" + "bne 47b\n" + "add %x[in_ptr], x20, #0x180\n" + "49:" // Initial: Height 6: no oddments + "b 108f\n" + "50:" // Initial: Height 7 + "mov x9, %x[out_ptr]\n" + "mov x10, %x[cols]\n" + "mov x28, %x[bias]\n" + "add x27, x9, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "add x24, x25, %x[ldout], LSL #1\n" + "cmp x10, #0xc\n" + "add x23, x24, %x[ldout], LSL #1\n" + "add x22, x23, %x[ldout], LSL #1\n" + "blt 54f\n" + "51:" // Initial: Height 7: Block loop + "cbnz %x[bias], 52f\n" + "movi v7.16b, #0x0\n" + "movi v6.16b, #0x0\n" + "movi v5.16b, #0x0\n" + "b 53f\n" + "52:" // Initial: Height 7: Width 3: bias + "ldr d18, [x28, #0x0]\n" + "ldr d17, [x28, #0x8]\n" + "ldr d16, [x28, #0x10]\n" + "shll v7.4s, v18.4h, #0x10\n" + "shll v6.4s, v17.4h, #0x10\n" + "shll v5.4s, v16.4h, #0x10\n" + "53:" // Initial: Height 7: Width 3: init done + "ldr q18, [%x[in_ptr], #0x0]\n" + "ldr q17, [%x[in_ptr], #0x10]\n" + "sub x10, x10, #0xc\n" + "add x28, x28, #0x18\n" + "ldr q16, [%x[in_ptr], #0x20]\n" + "ldr q21, [%x[in_ptr], #0x30]\n" + "cmp x10, #0xc\n" + "ldr q20, [%x[in_ptr], #0x40]\n" + "ldr q19, [%x[in_ptr], #0x50]\n" + "ldr q4, [%x[in_ptr], #0x60]\n" + "ldr q3, [%x[in_ptr], #0x70]\n" + "fadd v18.4s, v18.4s, v7.4s\n" + "fadd v17.4s, v17.4s, v6.4s\n" + "ldr q2, [%x[in_ptr], #0x80]\n" + "ldr q27, [%x[in_ptr], #0x90]\n" + "fadd v16.4s, v16.4s, v5.4s\n" + "fadd v21.4s, v21.4s, v7.4s\n" + "ldr q26, [%x[in_ptr], #0xa0]\n" + "ldr q25, [%x[in_ptr], #0xb0]\n" + "fadd v20.4s, v20.4s, v6.4s\n" + "fadd v19.4s, v19.4s, v5.4s\n" + "ldr q24, [%x[in_ptr], #0xc0]\n" + "ldr q23, [%x[in_ptr], #0xd0]\n" + "fadd v4.4s, v4.4s, v7.4s\n" + "fadd v3.4s, v3.4s, v6.4s\n" + "ldr q22, [%x[in_ptr], #0xe0]\n" + "ldr q1, [%x[in_ptr], #0xf0]\n" + "fadd v2.4s, v2.4s, v5.4s\n" + "fadd v27.4s, v27.4s, v7.4s\n" + "ldr q0, [%x[in_ptr], #0x100]\n" + "ldr q31, [%x[in_ptr], #0x110]\n" + "fadd v26.4s, v26.4s, v6.4s\n" + "fadd v25.4s, v25.4s, v5.4s\n" + "ldr q30, [%x[in_ptr], #0x120]\n" + "ldr q29, [%x[in_ptr], #0x130]\n" + "fadd v24.4s, v24.4s, v7.4s\n" + "fadd v23.4s, v23.4s, v6.4s\n" + "ldr q28, [%x[in_ptr], #0x140]\n" + "fadd v22.4s, v22.4s, v5.4s\n" + "fadd v1.4s, v1.4s, v7.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fadd v0.4s, v0.4s, v6.4s\n" + "fadd v31.4s, v31.4s, v5.4s\n" + "fadd v30.4s, v30.4s, v7.4s\n" + "fadd v29.4s, v29.4s, v6.4s\n" + "fadd v28.4s, v28.4s, v5.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmin v4.4s, v4.4s, v13.4s\n" + "fmin v3.4s, v3.4s, v13.4s\n" + "fmin v2.4s, v2.4s, v13.4s\n" + "fmin v27.4s, v27.4s, v13.4s\n" + "fmin v26.4s, v26.4s, v13.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmin v1.4s, v1.4s, v13.4s\n" + "fmin v0.4s, v0.4s, v13.4s\n" + "fmin v31.4s, v31.4s, v13.4s\n" + "fmin v30.4s, v30.4s, v13.4s\n" + "fmin v29.4s, v29.4s, v13.4s\n" + "fmin v28.4s, v28.4s, v13.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v4.4s, v4.4s, v12.4s\n" + "fmax v3.4s, v3.4s, v12.4s\n" + "fmax v2.4s, v2.4s, v12.4s\n" + "fmax v27.4s, v27.4s, v12.4s\n" + "fmax v26.4s, v26.4s, v12.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + "fmax v1.4s, v1.4s, v12.4s\n" + "fmax v0.4s, v0.4s, v12.4s\n" + "fmax v31.4s, v31.4s, v12.4s\n" + "fmax v30.4s, v30.4s, v12.4s\n" + "fmax v29.4s, v29.4s, v12.4s\n" + "fmax v28.4s, v28.4s, v12.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n" + ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n" + "str d18, [x9, #0x0]\n" + ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n" + ".inst 0x0ea16892 // bfcvtn v18.4h, v4.4s\n" + "str d17, [x9, #0x8]\n" + "str d16, [x9, #0x10]\n" + ".inst 0x0ea16871 // bfcvtn v17.4h, v3.4s\n" + ".inst 0x0ea16850 // bfcvtn v16.4h, v2.4s\n" + "add x9, x9, #0x18\n" + "str d21, [x27, #0x0]\n" + ".inst 0x0ea16b7b // bfcvtn v27.4h, v27.4s\n" + ".inst 0x0ea16b5a // bfcvtn v26.4h, v26.4s\n" + "str d20, [x27, #0x8]\n" + ".inst 0x0ea16b39 // bfcvtn v25.4h, v25.4s\n" + ".inst 0x0ea16b18 // bfcvtn v24.4h, v24.4s\n" + "str d19, [x27, #0x10]\n" + ".inst 0x0ea16af7 // bfcvtn v23.4h, v23.4s\n" + ".inst 0x0ea16ad6 // bfcvtn v22.4h, v22.4s\n" + "add x27, x27, #0x18\n" + "str d18, [x26, #0x0]\n" + ".inst 0x0ea16835 // bfcvtn v21.4h, v1.4s\n" + ".inst 0x0ea16814 // bfcvtn v20.4h, v0.4s\n" + "str d17, [x26, #0x8]\n" + ".inst 0x0ea16bf3 // bfcvtn v19.4h, v31.4s\n" + ".inst 0x0ea16bd2 // bfcvtn v18.4h, v30.4s\n" + "str d16, [x26, #0x10]\n" + ".inst 0x0ea16bb1 // bfcvtn v17.4h, v29.4s\n" + ".inst 0x0ea16b90 // bfcvtn v16.4h, v28.4s\n" + "add x26, x26, #0x18\n" + "str d27, [x25, #0x0]\n" + "str d26, [x25, #0x8]\n" + "str d25, [x25, #0x10]\n" + "add x25, x25, #0x18\n" + "str d24, [x24, #0x0]\n" + "str d23, [x24, #0x8]\n" + "str d22, [x24, #0x10]\n" + "add x24, x24, #0x18\n" + "str d21, [x23, #0x0]\n" + "str d20, [x23, #0x8]\n" + "str d19, [x23, #0x10]\n" + "add x23, x23, #0x18\n" + "str d18, [x22, #0x0]\n" + "str d17, [x22, #0x8]\n" + "str d16, [x22, #0x10]\n" + "add x22, x22, #0x18\n" + "bge 51b\n" + "54:" // Initial: Height 7: no full blocks + "cbz x10, 57f\n" + "mov x20, %x[in_ptr]\n" + "55:" // Initial: Height 7: Single loop + "movi v23.16b, #0x0\n" + "cbz %x[bias], 56f\n" + "ldr h16, [x28, #0x0]\n" + "shll v23.4s, v16.4h, #0x10\n" + "56:" // Initial: Height 7: Scalar: no bias + "ldr s16, [%x[in_ptr], #0x0]\n" + "ldr s21, [%x[in_ptr], #0x30]\n" + "subs x10, x10, #0x1\n" + "add x28, x28, #0x2\n" + "ldr s20, [%x[in_ptr], #0x60]\n" + "ldr s19, [%x[in_ptr], #0x90]\n" + "ldr s18, [%x[in_ptr], #0xc0]\n" + "ldr s17, [%x[in_ptr], #0xf0]\n" + "ldr s22, [%x[in_ptr], #0x120]\n" + "fadd v16.4s, v16.4s, v23.4s\n" + "fadd v21.4s, v21.4s, v23.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "fadd v20.4s, v20.4s, v23.4s\n" + "fadd v19.4s, v19.4s, v23.4s\n" + "fadd v18.4s, v18.4s, v23.4s\n" + "fadd v17.4s, v17.4s, v23.4s\n" + "fadd v22.4s, v22.4s, v23.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n" + ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n" + ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + "str h16, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + ".inst 0x0ea16ad0 // bfcvtn v16.4h, v22.4s\n" + "str h21, [x27, #0x0]\n" + "add x27, x27, #0x2\n" + "str h20, [x26, #0x0]\n" + "add x26, x26, #0x2\n" + "str h19, [x25, #0x0]\n" + "add x25, x25, #0x2\n" + "str h18, [x24, #0x0]\n" + "add x24, x24, #0x2\n" + "str h17, [x23, #0x0]\n" + "add x23, x23, #0x2\n" + "str h16, [x22, #0x0]\n" + "add x22, x22, #0x2\n" + "bne 55b\n" + "add %x[in_ptr], x20, #0x180\n" + "57:" // Initial: Height 7: no oddments + "b 108f\n" + "58:" // Initial: Height 8 + "mov x9, %x[out_ptr]\n" + "mov x10, %x[cols]\n" + "mov x28, %x[bias]\n" + "add x27, x9, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "add x24, x25, %x[ldout], LSL #1\n" + "cmp x10, #0xc\n" + "add x23, x24, %x[ldout], LSL #1\n" + "add x22, x23, %x[ldout], LSL #1\n" + "add x21, x22, %x[ldout], LSL #1\n" + "blt 62f\n" + "59:" // Initial: Height 8: Block loop + "cbnz %x[bias], 60f\n" + "movi v10.16b, #0x0\n" + "movi v9.16b, #0x0\n" + "movi v8.16b, #0x0\n" + "b 61f\n" + "60:" // Initial: Height 8: Width 3: bias + "ldr d18, [x28, #0x0]\n" + "ldr d17, [x28, #0x8]\n" + "ldr d16, [x28, #0x10]\n" + "shll v10.4s, v18.4h, #0x10\n" + "shll v9.4s, v17.4h, #0x10\n" + "shll v8.4s, v16.4h, #0x10\n" + "61:" // Initial: Height 8: Width 3: init done + "ldr q18, [%x[in_ptr], #0x0]\n" + "ldr q17, [%x[in_ptr], #0x10]\n" + "sub x10, x10, #0xc\n" + "add x28, x28, #0x18\n" + "ldr q16, [%x[in_ptr], #0x20]\n" + "ldr q22, [%x[in_ptr], #0x30]\n" + "cmp x10, #0xc\n" + "ldr q21, [%x[in_ptr], #0x40]\n" + "ldr q20, [%x[in_ptr], #0x50]\n" + "ldr q19, [%x[in_ptr], #0x60]\n" + "ldr q7, [%x[in_ptr], #0x70]\n" + "fadd v18.4s, v18.4s, v10.4s\n" + "fadd v17.4s, v17.4s, v9.4s\n" + "ldr q6, [%x[in_ptr], #0x80]\n" + "ldr q5, [%x[in_ptr], #0x90]\n" + "fadd v16.4s, v16.4s, v8.4s\n" + "fadd v22.4s, v22.4s, v10.4s\n" + "ldr q29, [%x[in_ptr], #0xa0]\n" + "ldr q28, [%x[in_ptr], #0xb0]\n" + "fadd v21.4s, v21.4s, v9.4s\n" + "fadd v20.4s, v20.4s, v8.4s\n" + "ldr q27, [%x[in_ptr], #0xc0]\n" + "ldr q26, [%x[in_ptr], #0xd0]\n" + "fadd v19.4s, v19.4s, v10.4s\n" + "fadd v7.4s, v7.4s, v9.4s\n" + "ldr q25, [%x[in_ptr], #0xe0]\n" + "ldr q24, [%x[in_ptr], #0xf0]\n" + "fadd v6.4s, v6.4s, v8.4s\n" + "fadd v5.4s, v5.4s, v10.4s\n" + "ldr q23, [%x[in_ptr], #0x100]\n" + "ldr q4, [%x[in_ptr], #0x110]\n" + "fadd v29.4s, v29.4s, v9.4s\n" + "fadd v28.4s, v28.4s, v8.4s\n" + "ldr q3, [%x[in_ptr], #0x120]\n" + "ldr q2, [%x[in_ptr], #0x130]\n" + "fadd v27.4s, v27.4s, v10.4s\n" + "fadd v26.4s, v26.4s, v9.4s\n" + "ldr q1, [%x[in_ptr], #0x140]\n" + "ldr q0, [%x[in_ptr], #0x150]\n" + "fadd v25.4s, v25.4s, v8.4s\n" + "fadd v24.4s, v24.4s, v10.4s\n" + "ldr q31, [%x[in_ptr], #0x160]\n" + "ldr q30, [%x[in_ptr], #0x170]\n" + "fadd v23.4s, v23.4s, v9.4s\n" + "fadd v4.4s, v4.4s, v8.4s\n" + "fadd v3.4s, v3.4s, v10.4s\n" + "fadd v2.4s, v2.4s, v9.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fadd v1.4s, v1.4s, v8.4s\n" + "fadd v0.4s, v0.4s, v10.4s\n" + "fadd v31.4s, v31.4s, v9.4s\n" + "fadd v30.4s, v30.4s, v8.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmin v7.4s, v7.4s, v13.4s\n" + "fmin v6.4s, v6.4s, v13.4s\n" + "fmin v5.4s, v5.4s, v13.4s\n" + "fmin v29.4s, v29.4s, v13.4s\n" + "fmin v28.4s, v28.4s, v13.4s\n" + "fmin v27.4s, v27.4s, v13.4s\n" + "fmin v26.4s, v26.4s, v13.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v4.4s, v4.4s, v13.4s\n" + "fmin v3.4s, v3.4s, v13.4s\n" + "fmin v2.4s, v2.4s, v13.4s\n" + "fmin v1.4s, v1.4s, v13.4s\n" + "fmin v0.4s, v0.4s, v13.4s\n" + "fmin v31.4s, v31.4s, v13.4s\n" + "fmin v30.4s, v30.4s, v13.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v7.4s, v7.4s, v12.4s\n" + "fmax v6.4s, v6.4s, v12.4s\n" + "fmax v5.4s, v5.4s, v12.4s\n" + "fmax v29.4s, v29.4s, v12.4s\n" + "fmax v28.4s, v28.4s, v12.4s\n" + "fmax v27.4s, v27.4s, v12.4s\n" + "fmax v26.4s, v26.4s, v12.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v4.4s, v4.4s, v12.4s\n" + "fmax v3.4s, v3.4s, v12.4s\n" + "fmax v2.4s, v2.4s, v12.4s\n" + "fmax v1.4s, v1.4s, v12.4s\n" + "fmax v0.4s, v0.4s, v12.4s\n" + "fmax v31.4s, v31.4s, v12.4s\n" + "fmax v30.4s, v30.4s, v12.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + ".inst 0x0ea16ad6 // bfcvtn v22.4h, v22.4s\n" + ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n" + ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n" + "str d18, [x9, #0x0]\n" + "str d17, [x9, #0x8]\n" + ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n" + ".inst 0x0ea168f2 // bfcvtn v18.4h, v7.4s\n" + "str d16, [x9, #0x10]\n" + ".inst 0x0ea168d1 // bfcvtn v17.4h, v6.4s\n" + ".inst 0x0ea168b0 // bfcvtn v16.4h, v5.4s\n" + "add x9, x9, #0x18\n" + "str d22, [x27, #0x0]\n" + ".inst 0x0ea16bbd // bfcvtn v29.4h, v29.4s\n" + ".inst 0x0ea16b9c // bfcvtn v28.4h, v28.4s\n" + "str d21, [x27, #0x8]\n" + ".inst 0x0ea16b7b // bfcvtn v27.4h, v27.4s\n" + ".inst 0x0ea16b5a // bfcvtn v26.4h, v26.4s\n" + "str d20, [x27, #0x10]\n" + ".inst 0x0ea16b39 // bfcvtn v25.4h, v25.4s\n" + ".inst 0x0ea16b18 // bfcvtn v24.4h, v24.4s\n" + "add x27, x27, #0x18\n" + "str d19, [x26, #0x0]\n" + ".inst 0x0ea16af7 // bfcvtn v23.4h, v23.4s\n" + ".inst 0x0ea16896 // bfcvtn v22.4h, v4.4s\n" + "str d18, [x26, #0x8]\n" + ".inst 0x0ea16875 // bfcvtn v21.4h, v3.4s\n" + ".inst 0x0ea16854 // bfcvtn v20.4h, v2.4s\n" + "str d17, [x26, #0x10]\n" + ".inst 0x0ea16833 // bfcvtn v19.4h, v1.4s\n" + ".inst 0x0ea16812 // bfcvtn v18.4h, v0.4s\n" + "add x26, x26, #0x18\n" + "str d16, [x25, #0x0]\n" + ".inst 0x0ea16bf1 // bfcvtn v17.4h, v31.4s\n" + ".inst 0x0ea16bd0 // bfcvtn v16.4h, v30.4s\n" + "str d29, [x25, #0x8]\n" + "str d28, [x25, #0x10]\n" + "add x25, x25, #0x18\n" + "str d27, [x24, #0x0]\n" + "str d26, [x24, #0x8]\n" + "str d25, [x24, #0x10]\n" + "add x24, x24, #0x18\n" + "str d24, [x23, #0x0]\n" + "str d23, [x23, #0x8]\n" + "str d22, [x23, #0x10]\n" + "add x23, x23, #0x18\n" + "str d21, [x22, #0x0]\n" + "str d20, [x22, #0x8]\n" + "str d19, [x22, #0x10]\n" + "add x22, x22, #0x18\n" + "str d18, [x21, #0x0]\n" + "str d17, [x21, #0x8]\n" + "str d16, [x21, #0x10]\n" + "add x21, x21, #0x18\n" + "bge 59b\n" + "62:" // Initial: Height 8: no full blocks + "cbz x10, 65f\n" + "mov x20, %x[in_ptr]\n" + "63:" // Initial: Height 8: Single loop + "movi v24.16b, #0x0\n" + "cbz %x[bias], 64f\n" + "ldr h16, [x28, #0x0]\n" + "shll v24.4s, v16.4h, #0x10\n" + "64:" // Initial: Height 8: Scalar: no bias + "ldr s17, [%x[in_ptr], #0x0]\n" + "ldr s16, [%x[in_ptr], #0x30]\n" + "subs x10, x10, #0x1\n" + "add x28, x28, #0x2\n" + "ldr s21, [%x[in_ptr], #0x60]\n" + "ldr s20, [%x[in_ptr], #0x90]\n" + "ldr s19, [%x[in_ptr], #0xc0]\n" + "ldr s18, [%x[in_ptr], #0xf0]\n" + "ldr s23, [%x[in_ptr], #0x120]\n" + "ldr s22, [%x[in_ptr], #0x150]\n" + "fadd v17.4s, v17.4s, v24.4s\n" + "fadd v16.4s, v16.4s, v24.4s\n" + "fadd v21.4s, v21.4s, v24.4s\n" + "fadd v20.4s, v20.4s, v24.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "fadd v19.4s, v19.4s, v24.4s\n" + "fadd v18.4s, v18.4s, v24.4s\n" + "fadd v23.4s, v23.4s, v24.4s\n" + "fadd v22.4s, v22.4s, v24.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v16.4s, v16.4s, v13.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v16.4s, v16.4s, v12.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n" + ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n" + ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n" + ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + "str h17, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + "str h16, [x27, #0x0]\n" + ".inst 0x0ea16af1 // bfcvtn v17.4h, v23.4s\n" + ".inst 0x0ea16ad0 // bfcvtn v16.4h, v22.4s\n" + "add x27, x27, #0x2\n" + "str h21, [x26, #0x0]\n" + "add x26, x26, #0x2\n" + "str h20, [x25, #0x0]\n" + "add x25, x25, #0x2\n" + "str h19, [x24, #0x0]\n" + "add x24, x24, #0x2\n" + "str h18, [x23, #0x0]\n" + "add x23, x23, #0x2\n" + "str h17, [x22, #0x0]\n" + "add x22, x22, #0x2\n" + "str h16, [x21, #0x0]\n" + "add x21, x21, #0x2\n" + "bne 63b\n" + "add %x[in_ptr], x20, #0x180\n" + "65:" // Initial: Height 8: no oddments + "subs %x[rows], %x[rows], #0x8\n" + "add %x[out_ptr], %x[out_ptr], x11\n" + "bgt 1b\n" + "b 108f\n" + "66:" // Accumulate + "67:" // Accumulate: Row loop + "cmp %x[rows], #0x7\n" + "bgt 103f\n" + "beq 98f\n" + "cmp %x[rows], #0x5\n" + "bgt 93f\n" + "beq 88f\n" + "cmp %x[rows], #0x3\n" + "bgt 83f\n" + "beq 78f\n" + "cmp %x[rows], #0x1\n" + "bgt 73f\n" + "68:" // Accumulate: Height 1 + "mov x10, %x[cols]\n" + "mov x9, %x[out_ptr]\n" + "cmp x10, #0xc\n" + "blt 70f\n" + "69:" // Accumulate: Height 1: Block loop + "ldr d16, [x9, #0x0]\n" + "ldr q19, [%x[in_ptr], #0x0]\n" + "sub x10, x10, #0xc\n" + "ldr q18, [%x[in_ptr], #0x10]\n" + "ldr q17, [%x[in_ptr], #0x20]\n" + "cmp x10, #0xc\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v19.4s, v19.4s, v16.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + ".inst 0x0ea16a70 // bfcvtn v16.4h, v19.4s\n" + "str d16, [x9, #0x0]\n" + "ldr d16, [x9, #0x8]\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v18.4s, v18.4s, v16.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + ".inst 0x0ea16a50 // bfcvtn v16.4h, v18.4s\n" + "str d16, [x9, #0x8]\n" + "ldr d16, [x9, #0x10]\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v17.4s, v17.4s, v16.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + ".inst 0x0ea16a30 // bfcvtn v16.4h, v17.4s\n" + "str d16, [x9, #0x10]\n" + "add x9, x9, #0x18\n" + "bge 69b\n" + "70:" // Accumulate: Height 1: no full blocks + "cbz x10, 72f\n" + "mov x20, %x[in_ptr]\n" + "71:" // Accumulate: Height 1: Single loop + "ldr h16, [x9, #0x0]\n" + "ldr s17, [%x[in_ptr], #0x0]\n" + "subs x10, x10, #0x1\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v17.4s, v17.4s, v16.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + ".inst 0x0ea16a30 // bfcvtn v16.4h, v17.4s\n" + "str h16, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + "bne 71b\n" + "add %x[in_ptr], x20, #0x180\n" + "72:" // Accumulate: Height 1: no oddments + "b 108f\n" + "73:" // Accumulate: Height 2 + "mov x10, %x[cols]\n" + "mov x9, %x[out_ptr]\n" + "cmp x10, #0xc\n" + "add x27, x9, %x[ldout], LSL #1\n" + "blt 75f\n" + "74:" // Accumulate: Height 2: Block loop + "ldr d17, [x9, #0x0]\n" + "ldr d16, [x27, #0x0]\n" + "sub x10, x10, #0xc\n" + "ldr q23, [%x[in_ptr], #0x0]\n" + "ldr q22, [%x[in_ptr], #0x30]\n" + "cmp x10, #0xc\n" + "ldr q21, [%x[in_ptr], #0x10]\n" + "ldr q20, [%x[in_ptr], #0x40]\n" + "ldr q19, [%x[in_ptr], #0x20]\n" + "ldr q18, [%x[in_ptr], #0x50]\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fadd v23.4s, v23.4s, v17.4s\n" + "fadd v22.4s, v22.4s, v16.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + ".inst 0x0ea16af0 // bfcvtn v16.4h, v23.4s\n" + ".inst 0x0ea16ad1 // bfcvtn v17.4h, v22.4s\n" + "str d16, [x9, #0x0]\n" + "ldr d16, [x9, #0x8]\n" + "str d17, [x27, #0x0]\n" + "shll v17.4s, v16.4h, #0x10\n" + "ldr d16, [x27, #0x8]\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v21.4s, v21.4s, v17.4s\n" + "fadd v20.4s, v20.4s, v16.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + ".inst 0x0ea16ab0 // bfcvtn v16.4h, v21.4s\n" + ".inst 0x0ea16a91 // bfcvtn v17.4h, v20.4s\n" + "str d16, [x9, #0x8]\n" + "ldr d16, [x9, #0x10]\n" + "str d17, [x27, #0x8]\n" + "shll v17.4s, v16.4h, #0x10\n" + "ldr d16, [x27, #0x10]\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v19.4s, v19.4s, v17.4s\n" + "fadd v18.4s, v18.4s, v16.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + ".inst 0x0ea16a71 // bfcvtn v17.4h, v19.4s\n" + ".inst 0x0ea16a50 // bfcvtn v16.4h, v18.4s\n" + "str d17, [x9, #0x10]\n" + "add x9, x9, #0x18\n" + "str d16, [x27, #0x10]\n" + "add x27, x27, #0x18\n" + "bge 74b\n" + "75:" // Accumulate: Height 2: no full blocks + "cbz x10, 77f\n" + "mov x20, %x[in_ptr]\n" + "76:" // Accumulate: Height 2: Single loop + "ldr h17, [x9, #0x0]\n" + "ldr h16, [x27, #0x0]\n" + "subs x10, x10, #0x1\n" + "ldr s19, [%x[in_ptr], #0x0]\n" + "ldr s18, [%x[in_ptr], #0x30]\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v19.4s, v19.4s, v17.4s\n" + "fadd v18.4s, v18.4s, v16.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + ".inst 0x0ea16a70 // bfcvtn v16.4h, v19.4s\n" + "str h16, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + ".inst 0x0ea16a50 // bfcvtn v16.4h, v18.4s\n" + "str h16, [x27, #0x0]\n" + "add x27, x27, #0x2\n" + "bne 76b\n" + "add %x[in_ptr], x20, #0x180\n" + "77:" // Accumulate: Height 2: no oddments + "b 108f\n" + "78:" // Accumulate: Height 3 + "mov x10, %x[cols]\n" + "mov x9, %x[out_ptr]\n" + "add x27, x9, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "cmp x10, #0xc\n" + "blt 80f\n" + "79:" // Accumulate: Height 3: Block loop + "ldr d18, [x9, #0x0]\n" + "ldr d17, [x27, #0x0]\n" + "sub x10, x10, #0xc\n" + "ldr d16, [x26, #0x0]\n" + "ldr q27, [%x[in_ptr], #0x0]\n" + "cmp x10, #0xc\n" + "ldr q26, [%x[in_ptr], #0x30]\n" + "ldr q25, [%x[in_ptr], #0x60]\n" + "ldr q24, [%x[in_ptr], #0x10]\n" + "ldr q23, [%x[in_ptr], #0x40]\n" + "shll v18.4s, v18.4h, #0x10\n" + "shll v17.4s, v17.4h, #0x10\n" + "ldr q22, [%x[in_ptr], #0x70]\n" + "ldr q21, [%x[in_ptr], #0x20]\n" + "shll v16.4s, v16.4h, #0x10\n" + "ldr q20, [%x[in_ptr], #0x50]\n" + "ldr q19, [%x[in_ptr], #0x80]\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fadd v27.4s, v27.4s, v18.4s\n" + "fadd v26.4s, v26.4s, v17.4s\n" + "fadd v25.4s, v25.4s, v16.4s\n" + "fmin v27.4s, v27.4s, v13.4s\n" + "fmin v26.4s, v26.4s, v13.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fmax v27.4s, v27.4s, v12.4s\n" + "fmax v26.4s, v26.4s, v12.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + ".inst 0x0ea16b72 // bfcvtn v18.4h, v27.4s\n" + ".inst 0x0ea16b50 // bfcvtn v16.4h, v26.4s\n" + ".inst 0x0ea16b31 // bfcvtn v17.4h, v25.4s\n" + "str d18, [x9, #0x0]\n" + "str d16, [x27, #0x0]\n" + "ldr d16, [x9, #0x8]\n" + "str d17, [x26, #0x0]\n" + "ldr d17, [x27, #0x8]\n" + "shll v18.4s, v16.4h, #0x10\n" + "ldr d16, [x26, #0x8]\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v24.4s, v24.4s, v18.4s\n" + "fadd v23.4s, v23.4s, v17.4s\n" + "fadd v22.4s, v22.4s, v16.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + ".inst 0x0ea16b10 // bfcvtn v16.4h, v24.4s\n" + ".inst 0x0ea16af2 // bfcvtn v18.4h, v23.4s\n" + "str d16, [x9, #0x8]\n" + ".inst 0x0ea16ad1 // bfcvtn v17.4h, v22.4s\n" + "ldr d16, [x9, #0x10]\n" + "str d18, [x27, #0x8]\n" + "str d17, [x26, #0x8]\n" + "shll v18.4s, v16.4h, #0x10\n" + "ldr d17, [x27, #0x10]\n" + "ldr d16, [x26, #0x10]\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v21.4s, v21.4s, v18.4s\n" + "fadd v20.4s, v20.4s, v17.4s\n" + "fadd v19.4s, v19.4s, v16.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + ".inst 0x0ea16ab0 // bfcvtn v16.4h, v21.4s\n" + ".inst 0x0ea16a91 // bfcvtn v17.4h, v20.4s\n" + "str d16, [x9, #0x10]\n" + "add x9, x9, #0x18\n" + ".inst 0x0ea16a70 // bfcvtn v16.4h, v19.4s\n" + "str d17, [x27, #0x10]\n" + "add x27, x27, #0x18\n" + "str d16, [x26, #0x10]\n" + "add x26, x26, #0x18\n" + "bge 79b\n" + "80:" // Accumulate: Height 3: no full blocks + "cbz x10, 82f\n" + "mov x20, %x[in_ptr]\n" + "81:" // Accumulate: Height 3: Single loop + "ldr h18, [x9, #0x0]\n" + "ldr h17, [x27, #0x0]\n" + "subs x10, x10, #0x1\n" + "ldr h16, [x26, #0x0]\n" + "ldr s21, [%x[in_ptr], #0x0]\n" + "ldr s20, [%x[in_ptr], #0x30]\n" + "ldr s19, [%x[in_ptr], #0x60]\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "shll v18.4s, v18.4h, #0x10\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v21.4s, v21.4s, v18.4s\n" + "fadd v20.4s, v20.4s, v17.4s\n" + "fadd v19.4s, v19.4s, v16.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + ".inst 0x0ea16ab0 // bfcvtn v16.4h, v21.4s\n" + ".inst 0x0ea16a91 // bfcvtn v17.4h, v20.4s\n" + "str h16, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + ".inst 0x0ea16a70 // bfcvtn v16.4h, v19.4s\n" + "str h17, [x27, #0x0]\n" + "add x27, x27, #0x2\n" + "str h16, [x26, #0x0]\n" + "add x26, x26, #0x2\n" + "bne 81b\n" + "add %x[in_ptr], x20, #0x180\n" + "82:" // Accumulate: Height 3: no oddments + "b 108f\n" + "83:" // Accumulate: Height 4 + "mov x9, %x[out_ptr]\n" + "mov x10, %x[cols]\n" + "add x27, x9, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "cmp x10, #0xc\n" + "blt 85f\n" + "84:" // Accumulate: Height 4: Block loop + "ldr d19, [x9, #0x0]\n" + "ldr d18, [x27, #0x0]\n" + "sub x10, x10, #0xc\n" + "ldr d17, [x26, #0x0]\n" + "ldr d16, [x25, #0x0]\n" + "cmp x10, #0xc\n" + "ldr q31, [%x[in_ptr], #0x0]\n" + "ldr q30, [%x[in_ptr], #0x30]\n" + "ldr q29, [%x[in_ptr], #0x60]\n" + "ldr q28, [%x[in_ptr], #0x90]\n" + "shll v19.4s, v19.4h, #0x10\n" + "shll v18.4s, v18.4h, #0x10\n" + "ldr q27, [%x[in_ptr], #0x10]\n" + "ldr q26, [%x[in_ptr], #0x40]\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "ldr q25, [%x[in_ptr], #0x70]\n" + "ldr q24, [%x[in_ptr], #0xa0]\n" + "ldr q23, [%x[in_ptr], #0x20]\n" + "ldr q22, [%x[in_ptr], #0x50]\n" + "fadd v31.4s, v31.4s, v19.4s\n" + "fadd v30.4s, v30.4s, v18.4s\n" + "ldr q21, [%x[in_ptr], #0x80]\n" + "ldr q20, [%x[in_ptr], #0xb0]\n" + "fadd v29.4s, v29.4s, v17.4s\n" + "fadd v28.4s, v28.4s, v16.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fmin v31.4s, v31.4s, v13.4s\n" + "fmin v30.4s, v30.4s, v13.4s\n" + "fmin v29.4s, v29.4s, v13.4s\n" + "fmin v28.4s, v28.4s, v13.4s\n" + "fmax v31.4s, v31.4s, v12.4s\n" + "fmax v30.4s, v30.4s, v12.4s\n" + "fmax v29.4s, v29.4s, v12.4s\n" + "fmax v28.4s, v28.4s, v12.4s\n" + ".inst 0x0ea16bf3 // bfcvtn v19.4h, v31.4s\n" + ".inst 0x0ea16bd0 // bfcvtn v16.4h, v30.4s\n" + ".inst 0x0ea16bb2 // bfcvtn v18.4h, v29.4s\n" + ".inst 0x0ea16b91 // bfcvtn v17.4h, v28.4s\n" + "str d19, [x9, #0x0]\n" + "str d16, [x27, #0x0]\n" + "ldr d16, [x9, #0x8]\n" + "str d18, [x26, #0x0]\n" + "str d17, [x25, #0x0]\n" + "ldr d18, [x27, #0x8]\n" + "shll v19.4s, v16.4h, #0x10\n" + "ldr d17, [x26, #0x8]\n" + "ldr d16, [x25, #0x8]\n" + "shll v18.4s, v18.4h, #0x10\n" + "shll v17.4s, v17.4h, #0x10\n" + "fadd v27.4s, v27.4s, v19.4s\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v26.4s, v26.4s, v18.4s\n" + "fadd v25.4s, v25.4s, v17.4s\n" + "fadd v24.4s, v24.4s, v16.4s\n" + "fmin v27.4s, v27.4s, v13.4s\n" + "fmin v26.4s, v26.4s, v13.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmax v27.4s, v27.4s, v12.4s\n" + "fmax v26.4s, v26.4s, v12.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + ".inst 0x0ea16b71 // bfcvtn v17.4h, v27.4s\n" + ".inst 0x0ea16b53 // bfcvtn v19.4h, v26.4s\n" + ".inst 0x0ea16b30 // bfcvtn v16.4h, v25.4s\n" + "str d17, [x9, #0x8]\n" + ".inst 0x0ea16b12 // bfcvtn v18.4h, v24.4s\n" + "ldr d17, [x9, #0x10]\n" + "str d19, [x27, #0x8]\n" + "str d16, [x26, #0x8]\n" + "ldr d16, [x27, #0x10]\n" + "str d18, [x25, #0x8]\n" + "shll v19.4s, v17.4h, #0x10\n" + "ldr d17, [x26, #0x10]\n" + "shll v18.4s, v16.4h, #0x10\n" + "ldr d16, [x25, #0x10]\n" + "shll v17.4s, v17.4h, #0x10\n" + "fadd v23.4s, v23.4s, v19.4s\n" + "fadd v22.4s, v22.4s, v18.4s\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v21.4s, v21.4s, v17.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fadd v20.4s, v20.4s, v16.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + ".inst 0x0ea16af1 // bfcvtn v17.4h, v23.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + ".inst 0x0ea16ad0 // bfcvtn v16.4h, v22.4s\n" + "str d17, [x9, #0x10]\n" + "add x9, x9, #0x18\n" + ".inst 0x0ea16ab1 // bfcvtn v17.4h, v21.4s\n" + "str d16, [x27, #0x10]\n" + "add x27, x27, #0x18\n" + ".inst 0x0ea16a90 // bfcvtn v16.4h, v20.4s\n" + "str d17, [x26, #0x10]\n" + "add x26, x26, #0x18\n" + "str d16, [x25, #0x10]\n" + "add x25, x25, #0x18\n" + "bge 84b\n" + "85:" // Accumulate: Height 4: no full blocks + "cbz x10, 87f\n" + "mov x20, %x[in_ptr]\n" + "86:" // Accumulate: Height 4: Single loop + "ldr h19, [x9, #0x0]\n" + "ldr h18, [x27, #0x0]\n" + "subs x10, x10, #0x1\n" + "ldr h17, [x26, #0x0]\n" + "ldr h16, [x25, #0x0]\n" + "ldr s23, [%x[in_ptr], #0x0]\n" + "ldr s22, [%x[in_ptr], #0x30]\n" + "ldr s21, [%x[in_ptr], #0x60]\n" + "ldr s20, [%x[in_ptr], #0x90]\n" + "shll v19.4s, v19.4h, #0x10\n" + "shll v18.4s, v18.4h, #0x10\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "fadd v23.4s, v23.4s, v19.4s\n" + "fadd v22.4s, v22.4s, v18.4s\n" + "fadd v21.4s, v21.4s, v17.4s\n" + "fadd v20.4s, v20.4s, v16.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + ".inst 0x0ea16af3 // bfcvtn v19.4h, v23.4s\n" + ".inst 0x0ea16ad2 // bfcvtn v18.4h, v22.4s\n" + ".inst 0x0ea16ab1 // bfcvtn v17.4h, v21.4s\n" + ".inst 0x0ea16a90 // bfcvtn v16.4h, v20.4s\n" + "str h19, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + "str h18, [x27, #0x0]\n" + "add x27, x27, #0x2\n" + "str h17, [x26, #0x0]\n" + "add x26, x26, #0x2\n" + "str h16, [x25, #0x0]\n" + "add x25, x25, #0x2\n" + "bne 86b\n" + "add %x[in_ptr], x20, #0x180\n" + "87:" // Accumulate: Height 4: no oddments + "b 108f\n" + "88:" // Accumulate: Height 5 + "mov x9, %x[out_ptr]\n" + "mov x10, %x[cols]\n" + "add x27, x9, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "cmp x10, #0xc\n" + "add x24, x25, %x[ldout], LSL #1\n" + "blt 90f\n" + "89:" // Accumulate: Height 5: Block loop + "ldr d20, [x9, #0x0]\n" + "ldr d19, [x27, #0x0]\n" + "sub x10, x10, #0xc\n" + "ldr d18, [x26, #0x0]\n" + "ldr d17, [x25, #0x0]\n" + "cmp x10, #0xc\n" + "ldr d16, [x24, #0x0]\n" + "ldr q3, [%x[in_ptr], #0x0]\n" + "ldr q2, [%x[in_ptr], #0x30]\n" + "ldr q1, [%x[in_ptr], #0x60]\n" + "shll v20.4s, v20.4h, #0x10\n" + "shll v19.4s, v19.4h, #0x10\n" + "ldr q0, [%x[in_ptr], #0x90]\n" + "ldr q31, [%x[in_ptr], #0xc0]\n" + "shll v18.4s, v18.4h, #0x10\n" + "shll v17.4s, v17.4h, #0x10\n" + "ldr q30, [%x[in_ptr], #0x10]\n" + "ldr q29, [%x[in_ptr], #0x40]\n" + "shll v16.4s, v16.4h, #0x10\n" + "ldr q28, [%x[in_ptr], #0x70]\n" + "ldr q27, [%x[in_ptr], #0xa0]\n" + "fadd v3.4s, v3.4s, v20.4s\n" + "fadd v2.4s, v2.4s, v19.4s\n" + "ldr q26, [%x[in_ptr], #0xd0]\n" + "ldr q25, [%x[in_ptr], #0x20]\n" + "fadd v1.4s, v1.4s, v18.4s\n" + "fadd v0.4s, v0.4s, v17.4s\n" + "ldr q24, [%x[in_ptr], #0x50]\n" + "ldr q23, [%x[in_ptr], #0x80]\n" + "fadd v31.4s, v31.4s, v16.4s\n" + "ldr q22, [%x[in_ptr], #0xb0]\n" + "ldr q21, [%x[in_ptr], #0xe0]\n" + "fmin v3.4s, v3.4s, v13.4s\n" + "fmin v2.4s, v2.4s, v13.4s\n" + "fmin v1.4s, v1.4s, v13.4s\n" + "fmin v0.4s, v0.4s, v13.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fmin v31.4s, v31.4s, v13.4s\n" + "fmax v3.4s, v3.4s, v12.4s\n" + "fmax v2.4s, v2.4s, v12.4s\n" + "fmax v1.4s, v1.4s, v12.4s\n" + "fmax v0.4s, v0.4s, v12.4s\n" + "fmax v31.4s, v31.4s, v12.4s\n" + ".inst 0x0ea16874 // bfcvtn v20.4h, v3.4s\n" + ".inst 0x0ea16853 // bfcvtn v19.4h, v2.4s\n" + ".inst 0x0ea16831 // bfcvtn v17.4h, v1.4s\n" + ".inst 0x0ea16810 // bfcvtn v16.4h, v0.4s\n" + ".inst 0x0ea16bf2 // bfcvtn v18.4h, v31.4s\n" + "str d20, [x9, #0x0]\n" + "str d19, [x27, #0x0]\n" + "str d17, [x26, #0x0]\n" + "ldr d17, [x9, #0x8]\n" + "str d16, [x25, #0x0]\n" + "ldr d16, [x27, #0x8]\n" + "str d18, [x24, #0x0]\n" + "ldr d18, [x26, #0x8]\n" + "shll v20.4s, v17.4h, #0x10\n" + "ldr d17, [x25, #0x8]\n" + "shll v19.4s, v16.4h, #0x10\n" + "ldr d16, [x24, #0x8]\n" + "shll v18.4s, v18.4h, #0x10\n" + "shll v17.4s, v17.4h, #0x10\n" + "fadd v30.4s, v30.4s, v20.4s\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v29.4s, v29.4s, v19.4s\n" + "fadd v28.4s, v28.4s, v18.4s\n" + "fadd v27.4s, v27.4s, v17.4s\n" + "fmin v30.4s, v30.4s, v13.4s\n" + "fadd v26.4s, v26.4s, v16.4s\n" + "fmin v29.4s, v29.4s, v13.4s\n" + "fmin v28.4s, v28.4s, v13.4s\n" + "fmin v27.4s, v27.4s, v13.4s\n" + "fmin v26.4s, v26.4s, v13.4s\n" + "fmax v30.4s, v30.4s, v12.4s\n" + "fmax v29.4s, v29.4s, v12.4s\n" + "fmax v28.4s, v28.4s, v12.4s\n" + "fmax v27.4s, v27.4s, v12.4s\n" + "fmax v26.4s, v26.4s, v12.4s\n" + ".inst 0x0ea16bd2 // bfcvtn v18.4h, v30.4s\n" + ".inst 0x0ea16bb3 // bfcvtn v19.4h, v29.4s\n" + ".inst 0x0ea16b91 // bfcvtn v17.4h, v28.4s\n" + ".inst 0x0ea16b70 // bfcvtn v16.4h, v27.4s\n" + "str d18, [x9, #0x8]\n" + ".inst 0x0ea16b52 // bfcvtn v18.4h, v26.4s\n" + "str d19, [x27, #0x8]\n" + "str d17, [x26, #0x8]\n" + "ldr d17, [x9, #0x10]\n" + "str d16, [x25, #0x8]\n" + "ldr d16, [x27, #0x10]\n" + "str d18, [x24, #0x8]\n" + "ldr d18, [x26, #0x10]\n" + "shll v20.4s, v17.4h, #0x10\n" + "ldr d17, [x25, #0x10]\n" + "shll v19.4s, v16.4h, #0x10\n" + "ldr d16, [x24, #0x10]\n" + "shll v18.4s, v18.4h, #0x10\n" + "shll v17.4s, v17.4h, #0x10\n" + "fadd v25.4s, v25.4s, v20.4s\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v24.4s, v24.4s, v19.4s\n" + "fadd v23.4s, v23.4s, v18.4s\n" + "fadd v22.4s, v22.4s, v17.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fadd v21.4s, v21.4s, v16.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + ".inst 0x0ea16b30 // bfcvtn v16.4h, v25.4s\n" + ".inst 0x0ea16b13 // bfcvtn v19.4h, v24.4s\n" + ".inst 0x0ea16af2 // bfcvtn v18.4h, v23.4s\n" + ".inst 0x0ea16ad1 // bfcvtn v17.4h, v22.4s\n" + "str d16, [x9, #0x10]\n" + "add x9, x9, #0x18\n" + ".inst 0x0ea16ab0 // bfcvtn v16.4h, v21.4s\n" + "str d19, [x27, #0x10]\n" + "add x27, x27, #0x18\n" + "str d18, [x26, #0x10]\n" + "add x26, x26, #0x18\n" + "str d17, [x25, #0x10]\n" + "add x25, x25, #0x18\n" + "str d16, [x24, #0x10]\n" + "add x24, x24, #0x18\n" + "bge 89b\n" + "90:" // Accumulate: Height 5: no full blocks + "cbz x10, 92f\n" + "mov x20, %x[in_ptr]\n" + "91:" // Accumulate: Height 5: Single loop + "ldr h20, [x9, #0x0]\n" + "ldr h19, [x27, #0x0]\n" + "subs x10, x10, #0x1\n" + "ldr h18, [x26, #0x0]\n" + "ldr h17, [x25, #0x0]\n" + "ldr h16, [x24, #0x0]\n" + "ldr s25, [%x[in_ptr], #0x0]\n" + "ldr s24, [%x[in_ptr], #0x30]\n" + "ldr s23, [%x[in_ptr], #0x60]\n" + "shll v20.4s, v20.4h, #0x10\n" + "shll v19.4s, v19.4h, #0x10\n" + "ldr s22, [%x[in_ptr], #0x90]\n" + "ldr s21, [%x[in_ptr], #0xc0]\n" + "shll v18.4s, v18.4h, #0x10\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "fadd v25.4s, v25.4s, v20.4s\n" + "fadd v24.4s, v24.4s, v19.4s\n" + "fadd v23.4s, v23.4s, v18.4s\n" + "fadd v22.4s, v22.4s, v17.4s\n" + "fadd v21.4s, v21.4s, v16.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmin v21.4s, v21.4s, v13.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + "fmax v21.4s, v21.4s, v12.4s\n" + ".inst 0x0ea16b34 // bfcvtn v20.4h, v25.4s\n" + ".inst 0x0ea16b13 // bfcvtn v19.4h, v24.4s\n" + ".inst 0x0ea16af2 // bfcvtn v18.4h, v23.4s\n" + ".inst 0x0ea16ad1 // bfcvtn v17.4h, v22.4s\n" + ".inst 0x0ea16ab0 // bfcvtn v16.4h, v21.4s\n" + "str h20, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + "str h19, [x27, #0x0]\n" + "add x27, x27, #0x2\n" + "str h18, [x26, #0x0]\n" + "add x26, x26, #0x2\n" + "str h17, [x25, #0x0]\n" + "add x25, x25, #0x2\n" + "str h16, [x24, #0x0]\n" + "add x24, x24, #0x2\n" + "bne 91b\n" + "add %x[in_ptr], x20, #0x180\n" + "92:" // Accumulate: Height 5: no oddments + "b 108f\n" + "93:" // Accumulate: Height 6 + "mov x9, %x[out_ptr]\n" + "mov x10, %x[cols]\n" + "add x27, x9, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "add x24, x25, %x[ldout], LSL #1\n" + "cmp x10, #0xc\n" + "add x23, x24, %x[ldout], LSL #1\n" + "blt 95f\n" + "94:" // Accumulate: Height 6: Block loop + "ldr d21, [x9, #0x0]\n" + "ldr d20, [x27, #0x0]\n" + "sub x10, x10, #0xc\n" + "ldr d19, [x26, #0x0]\n" + "ldr d18, [x25, #0x0]\n" + "cmp x10, #0xc\n" + "ldr d17, [x24, #0x0]\n" + "ldr d16, [x23, #0x0]\n" + "ldr q6, [%x[in_ptr], #0x0]\n" + "ldr q5, [%x[in_ptr], #0x30]\n" + "shll v22.4s, v21.4h, #0x10\n" + "shll v21.4s, v20.4h, #0x10\n" + "ldr q4, [%x[in_ptr], #0x60]\n" + "ldr q3, [%x[in_ptr], #0x90]\n" + "shll v20.4s, v19.4h, #0x10\n" + "shll v18.4s, v18.4h, #0x10\n" + "ldr q2, [%x[in_ptr], #0xc0]\n" + "ldr q19, [%x[in_ptr], #0xf0]\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "ldr q1, [%x[in_ptr], #0x10]\n" + "ldr q0, [%x[in_ptr], #0x40]\n" + "fadd v6.4s, v6.4s, v22.4s\n" + "fadd v5.4s, v5.4s, v21.4s\n" + "ldr q31, [%x[in_ptr], #0x70]\n" + "ldr q30, [%x[in_ptr], #0xa0]\n" + "fadd v4.4s, v4.4s, v20.4s\n" + "fadd v3.4s, v3.4s, v18.4s\n" + "ldr q29, [%x[in_ptr], #0xd0]\n" + "ldr q28, [%x[in_ptr], #0x100]\n" + "fadd v2.4s, v2.4s, v17.4s\n" + "fadd v19.4s, v19.4s, v16.4s\n" + "ldr q27, [%x[in_ptr], #0x20]\n" + "ldr q26, [%x[in_ptr], #0x50]\n" + "fmin v6.4s, v6.4s, v13.4s\n" + "fmin v5.4s, v5.4s, v13.4s\n" + "ldr q25, [%x[in_ptr], #0x80]\n" + "ldr q24, [%x[in_ptr], #0xb0]\n" + "fmin v4.4s, v4.4s, v13.4s\n" + "fmin v3.4s, v3.4s, v13.4s\n" + "ldr q23, [%x[in_ptr], #0xe0]\n" + "ldr q22, [%x[in_ptr], #0x110]\n" + "fmin v2.4s, v2.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmax v6.4s, v6.4s, v12.4s\n" + "fmax v5.4s, v5.4s, v12.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fmax v4.4s, v4.4s, v12.4s\n" + "fmax v3.4s, v3.4s, v12.4s\n" + "fmax v2.4s, v2.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + ".inst 0x0ea168d5 // bfcvtn v21.4h, v6.4s\n" + ".inst 0x0ea168b4 // bfcvtn v20.4h, v5.4s\n" + ".inst 0x0ea16892 // bfcvtn v18.4h, v4.4s\n" + ".inst 0x0ea16871 // bfcvtn v17.4h, v3.4s\n" + ".inst 0x0ea16850 // bfcvtn v16.4h, v2.4s\n" + ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n" + "str d21, [x9, #0x0]\n" + "str d20, [x27, #0x0]\n" + "str d18, [x26, #0x0]\n" + "ldr d18, [x9, #0x8]\n" + "str d17, [x25, #0x0]\n" + "ldr d17, [x27, #0x8]\n" + "str d16, [x24, #0x0]\n" + "ldr d16, [x26, #0x8]\n" + "str d19, [x23, #0x0]\n" + "shll v21.4s, v18.4h, #0x10\n" + "ldr d18, [x25, #0x8]\n" + "shll v20.4s, v17.4h, #0x10\n" + "ldr d17, [x24, #0x8]\n" + "shll v19.4s, v16.4h, #0x10\n" + "ldr d16, [x23, #0x8]\n" + "shll v18.4s, v18.4h, #0x10\n" + "fadd v1.4s, v1.4s, v21.4s\n" + "fadd v0.4s, v0.4s, v20.4s\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v31.4s, v31.4s, v19.4s\n" + "fadd v30.4s, v30.4s, v18.4s\n" + "fmin v1.4s, v1.4s, v13.4s\n" + "fmin v0.4s, v0.4s, v13.4s\n" + "fadd v29.4s, v29.4s, v17.4s\n" + "fadd v28.4s, v28.4s, v16.4s\n" + "fmin v31.4s, v31.4s, v13.4s\n" + "fmin v30.4s, v30.4s, v13.4s\n" + "fmax v1.4s, v1.4s, v12.4s\n" + "fmax v0.4s, v0.4s, v12.4s\n" + "fmin v29.4s, v29.4s, v13.4s\n" + "fmin v28.4s, v28.4s, v13.4s\n" + "fmax v31.4s, v31.4s, v12.4s\n" + "fmax v30.4s, v30.4s, v12.4s\n" + ".inst 0x0ea16832 // bfcvtn v18.4h, v1.4s\n" + ".inst 0x0ea16810 // bfcvtn v16.4h, v0.4s\n" + "fmax v29.4s, v29.4s, v12.4s\n" + "fmax v28.4s, v28.4s, v12.4s\n" + ".inst 0x0ea16bf4 // bfcvtn v20.4h, v31.4s\n" + ".inst 0x0ea16bd1 // bfcvtn v17.4h, v30.4s\n" + "str d18, [x9, #0x8]\n" + "str d16, [x27, #0x8]\n" + ".inst 0x0ea16bb3 // bfcvtn v19.4h, v29.4s\n" + ".inst 0x0ea16b92 // bfcvtn v18.4h, v28.4s\n" + "ldr d16, [x9, #0x10]\n" + "str d20, [x26, #0x8]\n" + "str d17, [x25, #0x8]\n" + "ldr d17, [x27, #0x10]\n" + "str d19, [x24, #0x8]\n" + "shll v21.4s, v16.4h, #0x10\n" + "ldr d16, [x26, #0x10]\n" + "str d18, [x23, #0x8]\n" + "ldr d18, [x25, #0x10]\n" + "shll v20.4s, v17.4h, #0x10\n" + "ldr d17, [x24, #0x10]\n" + "shll v19.4s, v16.4h, #0x10\n" + "fadd v27.4s, v27.4s, v21.4s\n" + "ldr d16, [x23, #0x10]\n" + "shll v18.4s, v18.4h, #0x10\n" + "shll v17.4s, v17.4h, #0x10\n" + "fadd v26.4s, v26.4s, v20.4s\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v25.4s, v25.4s, v19.4s\n" + "fmin v27.4s, v27.4s, v13.4s\n" + "fadd v24.4s, v24.4s, v18.4s\n" + "fadd v23.4s, v23.4s, v17.4s\n" + "fadd v22.4s, v22.4s, v16.4s\n" + "fmin v26.4s, v26.4s, v13.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fmax v27.4s, v27.4s, v12.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmax v26.4s, v26.4s, v12.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + ".inst 0x0ea16b71 // bfcvtn v17.4h, v27.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + ".inst 0x0ea16b50 // bfcvtn v16.4h, v26.4s\n" + "str d17, [x9, #0x10]\n" + "add x9, x9, #0x18\n" + ".inst 0x0ea16b33 // bfcvtn v19.4h, v25.4s\n" + ".inst 0x0ea16b12 // bfcvtn v18.4h, v24.4s\n" + ".inst 0x0ea16af1 // bfcvtn v17.4h, v23.4s\n" + "str d16, [x27, #0x10]\n" + "add x27, x27, #0x18\n" + ".inst 0x0ea16ad0 // bfcvtn v16.4h, v22.4s\n" + "str d19, [x26, #0x10]\n" + "add x26, x26, #0x18\n" + "str d18, [x25, #0x10]\n" + "add x25, x25, #0x18\n" + "str d17, [x24, #0x10]\n" + "add x24, x24, #0x18\n" + "str d16, [x23, #0x10]\n" + "add x23, x23, #0x18\n" + "bge 94b\n" + "95:" // Accumulate: Height 6: no full blocks + "cbz x10, 97f\n" + "mov x20, %x[in_ptr]\n" + "96:" // Accumulate: Height 6: Single loop + "ldr h21, [x9, #0x0]\n" + "ldr h20, [x27, #0x0]\n" + "subs x10, x10, #0x1\n" + "ldr h19, [x26, #0x0]\n" + "ldr h18, [x25, #0x0]\n" + "ldr h17, [x24, #0x0]\n" + "ldr h16, [x23, #0x0]\n" + "ldr s27, [%x[in_ptr], #0x0]\n" + "ldr s26, [%x[in_ptr], #0x30]\n" + "shll v21.4s, v21.4h, #0x10\n" + "shll v20.4s, v20.4h, #0x10\n" + "ldr s25, [%x[in_ptr], #0x60]\n" + "ldr s24, [%x[in_ptr], #0x90]\n" + "shll v19.4s, v19.4h, #0x10\n" + "shll v18.4s, v18.4h, #0x10\n" + "ldr s23, [%x[in_ptr], #0xc0]\n" + "ldr s22, [%x[in_ptr], #0xf0]\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v27.4s, v27.4s, v21.4s\n" + "fadd v26.4s, v26.4s, v20.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "fadd v25.4s, v25.4s, v19.4s\n" + "fadd v24.4s, v24.4s, v18.4s\n" + "fadd v23.4s, v23.4s, v17.4s\n" + "fadd v22.4s, v22.4s, v16.4s\n" + "fmin v27.4s, v27.4s, v13.4s\n" + "fmin v26.4s, v26.4s, v13.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmax v27.4s, v27.4s, v12.4s\n" + "fmax v26.4s, v26.4s, v12.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + ".inst 0x0ea16b75 // bfcvtn v21.4h, v27.4s\n" + ".inst 0x0ea16b54 // bfcvtn v20.4h, v26.4s\n" + ".inst 0x0ea16b33 // bfcvtn v19.4h, v25.4s\n" + ".inst 0x0ea16b12 // bfcvtn v18.4h, v24.4s\n" + ".inst 0x0ea16af1 // bfcvtn v17.4h, v23.4s\n" + ".inst 0x0ea16ad0 // bfcvtn v16.4h, v22.4s\n" + "str h21, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + "str h20, [x27, #0x0]\n" + "add x27, x27, #0x2\n" + "str h19, [x26, #0x0]\n" + "add x26, x26, #0x2\n" + "str h18, [x25, #0x0]\n" + "add x25, x25, #0x2\n" + "str h17, [x24, #0x0]\n" + "add x24, x24, #0x2\n" + "str h16, [x23, #0x0]\n" + "add x23, x23, #0x2\n" + "bne 96b\n" + "add %x[in_ptr], x20, #0x180\n" + "97:" // Accumulate: Height 6: no oddments + "b 108f\n" + "98:" // Accumulate: Height 7 + "mov x9, %x[out_ptr]\n" + "mov x10, %x[cols]\n" + "add x27, x9, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "add x24, x25, %x[ldout], LSL #1\n" + "add x23, x24, %x[ldout], LSL #1\n" + "cmp x10, #0xc\n" + "add x22, x23, %x[ldout], LSL #1\n" + "blt 100f\n" + "99:" // Accumulate: Height 7: Block loop + "ldr d22, [x9, #0x0]\n" + "ldr d21, [x27, #0x0]\n" + "sub x10, x10, #0xc\n" + "ldr d20, [x26, #0x0]\n" + "ldr d19, [x25, #0x0]\n" + "cmp x10, #0xc\n" + "ldr d18, [x24, #0x0]\n" + "ldr d17, [x23, #0x0]\n" + "ldr d16, [x22, #0x0]\n" + "ldr q9, [%x[in_ptr], #0x0]\n" + "shll v24.4s, v22.4h, #0x10\n" + "shll v23.4s, v21.4h, #0x10\n" + "ldr q8, [%x[in_ptr], #0x30]\n" + "ldr q7, [%x[in_ptr], #0x60]\n" + "shll v21.4s, v20.4h, #0x10\n" + "shll v19.4s, v19.4h, #0x10\n" + "ldr q6, [%x[in_ptr], #0x90]\n" + "ldr q5, [%x[in_ptr], #0xc0]\n" + "shll v18.4s, v18.4h, #0x10\n" + "shll v17.4s, v17.4h, #0x10\n" + "ldr q20, [%x[in_ptr], #0xf0]\n" + "ldr q22, [%x[in_ptr], #0x120]\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v9.4s, v9.4s, v24.4s\n" + "ldr q4, [%x[in_ptr], #0x10]\n" + "ldr q3, [%x[in_ptr], #0x40]\n" + "fadd v8.4s, v8.4s, v23.4s\n" + "fadd v7.4s, v7.4s, v21.4s\n" + "ldr q2, [%x[in_ptr], #0x70]\n" + "ldr q1, [%x[in_ptr], #0xa0]\n" + "fadd v6.4s, v6.4s, v19.4s\n" + "fadd v5.4s, v5.4s, v18.4s\n" + "ldr q0, [%x[in_ptr], #0xd0]\n" + "ldr q31, [%x[in_ptr], #0x100]\n" + "fadd v20.4s, v20.4s, v17.4s\n" + "fadd v22.4s, v22.4s, v16.4s\n" + "ldr q30, [%x[in_ptr], #0x130]\n" + "ldr q29, [%x[in_ptr], #0x20]\n" + "fmin v9.4s, v9.4s, v13.4s\n" + "fmin v8.4s, v8.4s, v13.4s\n" + "ldr q28, [%x[in_ptr], #0x50]\n" + "ldr q27, [%x[in_ptr], #0x80]\n" + "fmin v7.4s, v7.4s, v13.4s\n" + "fmin v6.4s, v6.4s, v13.4s\n" + "ldr q26, [%x[in_ptr], #0xb0]\n" + "ldr q25, [%x[in_ptr], #0xe0]\n" + "fmin v5.4s, v5.4s, v13.4s\n" + "fmin v20.4s, v20.4s, v13.4s\n" + "ldr q24, [%x[in_ptr], #0x110]\n" + "ldr q23, [%x[in_ptr], #0x140]\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmax v9.4s, v9.4s, v12.4s\n" + "fmax v8.4s, v8.4s, v12.4s\n" + "fmax v7.4s, v7.4s, v12.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fmax v6.4s, v6.4s, v12.4s\n" + "fmax v5.4s, v5.4s, v12.4s\n" + "fmax v20.4s, v20.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + ".inst 0x0ea16935 // bfcvtn v21.4h, v9.4s\n" + ".inst 0x0ea16913 // bfcvtn v19.4h, v8.4s\n" + ".inst 0x0ea168f0 // bfcvtn v16.4h, v7.4s\n" + ".inst 0x0ea168d2 // bfcvtn v18.4h, v6.4s\n" + ".inst 0x0ea168b1 // bfcvtn v17.4h, v5.4s\n" + ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n" + "str d21, [x9, #0x0]\n" + "str d19, [x27, #0x0]\n" + ".inst 0x0ea16ad3 // bfcvtn v19.4h, v22.4s\n" + "str d16, [x26, #0x0]\n" + "ldr d16, [x9, #0x8]\n" + "str d18, [x25, #0x0]\n" + "ldr d18, [x27, #0x8]\n" + "str d17, [x24, #0x0]\n" + "ldr d17, [x26, #0x8]\n" + "str d20, [x23, #0x0]\n" + "shll v22.4s, v16.4h, #0x10\n" + "ldr d16, [x25, #0x8]\n" + "str d19, [x22, #0x0]\n" + "shll v21.4s, v18.4h, #0x10\n" + "ldr d18, [x24, #0x8]\n" + "shll v20.4s, v17.4h, #0x10\n" + "ldr d17, [x23, #0x8]\n" + "shll v19.4s, v16.4h, #0x10\n" + "fadd v4.4s, v4.4s, v22.4s\n" + "ldr d16, [x22, #0x8]\n" + "shll v18.4s, v18.4h, #0x10\n" + "fadd v3.4s, v3.4s, v21.4s\n" + "fadd v2.4s, v2.4s, v20.4s\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v1.4s, v1.4s, v19.4s\n" + "fadd v0.4s, v0.4s, v18.4s\n" + "fmin v4.4s, v4.4s, v13.4s\n" + "fadd v31.4s, v31.4s, v17.4s\n" + "fmin v3.4s, v3.4s, v13.4s\n" + "fadd v30.4s, v30.4s, v16.4s\n" + "fmin v2.4s, v2.4s, v13.4s\n" + "fmin v1.4s, v1.4s, v13.4s\n" + "fmin v0.4s, v0.4s, v13.4s\n" + "fmin v31.4s, v31.4s, v13.4s\n" + "fmax v4.4s, v4.4s, v12.4s\n" + "fmin v30.4s, v30.4s, v13.4s\n" + "fmax v3.4s, v3.4s, v12.4s\n" + "fmax v2.4s, v2.4s, v12.4s\n" + "fmax v1.4s, v1.4s, v12.4s\n" + "fmax v0.4s, v0.4s, v12.4s\n" + "fmax v31.4s, v31.4s, v12.4s\n" + "fmax v30.4s, v30.4s, v12.4s\n" + ".inst 0x0ea16893 // bfcvtn v19.4h, v4.4s\n" + ".inst 0x0ea16875 // bfcvtn v21.4h, v3.4s\n" + ".inst 0x0ea16850 // bfcvtn v16.4h, v2.4s\n" + ".inst 0x0ea16832 // bfcvtn v18.4h, v1.4s\n" + ".inst 0x0ea16811 // bfcvtn v17.4h, v0.4s\n" + "str d19, [x9, #0x8]\n" + ".inst 0x0ea16bf4 // bfcvtn v20.4h, v31.4s\n" + ".inst 0x0ea16bd3 // bfcvtn v19.4h, v30.4s\n" + "str d21, [x27, #0x8]\n" + "str d16, [x26, #0x8]\n" + "ldr d16, [x9, #0x10]\n" + "str d18, [x25, #0x8]\n" + "ldr d18, [x27, #0x10]\n" + "str d17, [x24, #0x8]\n" + "ldr d17, [x26, #0x10]\n" + "str d20, [x23, #0x8]\n" + "shll v22.4s, v16.4h, #0x10\n" + "ldr d16, [x25, #0x10]\n" + "str d19, [x22, #0x8]\n" + "shll v21.4s, v18.4h, #0x10\n" + "ldr d18, [x24, #0x10]\n" + "shll v20.4s, v17.4h, #0x10\n" + "ldr d17, [x23, #0x10]\n" + "shll v19.4s, v16.4h, #0x10\n" + "fadd v29.4s, v29.4s, v22.4s\n" + "ldr d16, [x22, #0x10]\n" + "shll v18.4s, v18.4h, #0x10\n" + "fadd v28.4s, v28.4s, v21.4s\n" + "fadd v27.4s, v27.4s, v20.4s\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v26.4s, v26.4s, v19.4s\n" + "fadd v25.4s, v25.4s, v18.4s\n" + "fmin v29.4s, v29.4s, v13.4s\n" + "fadd v24.4s, v24.4s, v17.4s\n" + "fmin v28.4s, v28.4s, v13.4s\n" + "fadd v23.4s, v23.4s, v16.4s\n" + "fmin v27.4s, v27.4s, v13.4s\n" + "fmin v26.4s, v26.4s, v13.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmax v29.4s, v29.4s, v12.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmax v28.4s, v28.4s, v12.4s\n" + "fmax v27.4s, v27.4s, v12.4s\n" + "fmax v26.4s, v26.4s, v12.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + ".inst 0x0ea16bb0 // bfcvtn v16.4h, v29.4s\n" + ".inst 0x0ea16b95 // bfcvtn v21.4h, v28.4s\n" + ".inst 0x0ea16b74 // bfcvtn v20.4h, v27.4s\n" + ".inst 0x0ea16b53 // bfcvtn v19.4h, v26.4s\n" + ".inst 0x0ea16b32 // bfcvtn v18.4h, v25.4s\n" + "str d16, [x9, #0x10]\n" + "add x9, x9, #0x18\n" + ".inst 0x0ea16b11 // bfcvtn v17.4h, v24.4s\n" + ".inst 0x0ea16af0 // bfcvtn v16.4h, v23.4s\n" + "str d21, [x27, #0x10]\n" + "add x27, x27, #0x18\n" + "str d20, [x26, #0x10]\n" + "add x26, x26, #0x18\n" + "str d19, [x25, #0x10]\n" + "add x25, x25, #0x18\n" + "str d18, [x24, #0x10]\n" + "add x24, x24, #0x18\n" + "str d17, [x23, #0x10]\n" + "add x23, x23, #0x18\n" + "str d16, [x22, #0x10]\n" + "add x22, x22, #0x18\n" + "bge 99b\n" + "100:" // Accumulate: Height 7: no full blocks + "cbz x10, 102f\n" + "mov x20, %x[in_ptr]\n" + "101:" // Accumulate: Height 7: Single loop + "ldr h22, [x9, #0x0]\n" + "ldr h21, [x27, #0x0]\n" + "subs x10, x10, #0x1\n" + "ldr h20, [x26, #0x0]\n" + "ldr h19, [x25, #0x0]\n" + "ldr h18, [x24, #0x0]\n" + "ldr h17, [x23, #0x0]\n" + "ldr h16, [x22, #0x0]\n" + "ldr s29, [%x[in_ptr], #0x0]\n" + "shll v28.4s, v22.4h, #0x10\n" + "shll v27.4s, v21.4h, #0x10\n" + "ldr s26, [%x[in_ptr], #0x30]\n" + "ldr s25, [%x[in_ptr], #0x60]\n" + "shll v21.4s, v20.4h, #0x10\n" + "shll v20.4s, v19.4h, #0x10\n" + "ldr s24, [%x[in_ptr], #0x90]\n" + "ldr s23, [%x[in_ptr], #0xc0]\n" + "shll v19.4s, v18.4h, #0x10\n" + "shll v18.4s, v17.4h, #0x10\n" + "ldr s17, [%x[in_ptr], #0xf0]\n" + "ldr s22, [%x[in_ptr], #0x120]\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v29.4s, v29.4s, v28.4s\n" + "fadd v26.4s, v26.4s, v27.4s\n" + "fadd v25.4s, v25.4s, v21.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "fadd v24.4s, v24.4s, v20.4s\n" + "fadd v23.4s, v23.4s, v19.4s\n" + "fadd v17.4s, v17.4s, v18.4s\n" + "fadd v22.4s, v22.4s, v16.4s\n" + "fmin v29.4s, v29.4s, v13.4s\n" + "fmin v26.4s, v26.4s, v13.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v17.4s, v17.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmax v29.4s, v29.4s, v12.4s\n" + "fmax v26.4s, v26.4s, v12.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v17.4s, v17.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + ".inst 0x0ea16bb5 // bfcvtn v21.4h, v29.4s\n" + ".inst 0x0ea16b50 // bfcvtn v16.4h, v26.4s\n" + ".inst 0x0ea16b34 // bfcvtn v20.4h, v25.4s\n" + ".inst 0x0ea16b13 // bfcvtn v19.4h, v24.4s\n" + ".inst 0x0ea16af2 // bfcvtn v18.4h, v23.4s\n" + ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n" + "str h21, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + "str h16, [x27, #0x0]\n" + ".inst 0x0ea16ad0 // bfcvtn v16.4h, v22.4s\n" + "add x27, x27, #0x2\n" + "str h20, [x26, #0x0]\n" + "add x26, x26, #0x2\n" + "str h19, [x25, #0x0]\n" + "add x25, x25, #0x2\n" + "str h18, [x24, #0x0]\n" + "add x24, x24, #0x2\n" + "str h17, [x23, #0x0]\n" + "add x23, x23, #0x2\n" + "str h16, [x22, #0x0]\n" + "add x22, x22, #0x2\n" + "bne 101b\n" + "add %x[in_ptr], x20, #0x180\n" + "102:" // Accumulate: Height 7: no oddments + "b 108f\n" + "103:" // Accumulate: Height 8 + "mov x9, %x[out_ptr]\n" + "mov x10, %x[cols]\n" + "add x27, x9, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "add x24, x25, %x[ldout], LSL #1\n" + "add x23, x24, %x[ldout], LSL #1\n" + "cmp x10, #0xc\n" + "add x22, x23, %x[ldout], LSL #1\n" + "add x21, x22, %x[ldout], LSL #1\n" + "blt 105f\n" + "104:" // Accumulate: Height 8: Block loop + "ldr d23, [x9, #0x0]\n" + "ldr d22, [x27, #0x0]\n" + "sub x10, x10, #0xc\n" + "ldr d21, [x26, #0x0]\n" + "ldr d20, [x25, #0x0]\n" + "cmp x10, #0xc\n" + "ldr d19, [x24, #0x0]\n" + "ldr d18, [x23, #0x0]\n" + "ldr d17, [x22, #0x0]\n" + "ldr d16, [x21, #0x0]\n" + "shll v26.4s, v23.4h, #0x10\n" + "shll v25.4s, v22.4h, #0x10\n" + "ldr q11, [%x[in_ptr], #0x0]\n" + "ldr q10, [%x[in_ptr], #0x30]\n" + "shll v24.4s, v21.4h, #0x10\n" + "shll v23.4s, v20.4h, #0x10\n" + "ldr q9, [%x[in_ptr], #0x60]\n" + "ldr q8, [%x[in_ptr], #0x90]\n" + "shll v21.4s, v19.4h, #0x10\n" + "shll v20.4s, v18.4h, #0x10\n" + "ldr q18, [%x[in_ptr], #0xc0]\n" + "ldr q19, [%x[in_ptr], #0xf0]\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "ldr q7, [%x[in_ptr], #0x120]\n" + "ldr q22, [%x[in_ptr], #0x150]\n" + "fadd v11.4s, v11.4s, v26.4s\n" + "fadd v10.4s, v10.4s, v25.4s\n" + "ldr q6, [%x[in_ptr], #0x10]\n" + "ldr q5, [%x[in_ptr], #0x40]\n" + "fadd v9.4s, v9.4s, v24.4s\n" + "fadd v8.4s, v8.4s, v23.4s\n" + "ldr q4, [%x[in_ptr], #0x70]\n" + "ldr q3, [%x[in_ptr], #0xa0]\n" + "fadd v18.4s, v18.4s, v21.4s\n" + "fadd v19.4s, v19.4s, v20.4s\n" + "ldr q2, [%x[in_ptr], #0xd0]\n" + "ldr q1, [%x[in_ptr], #0x100]\n" + "fadd v7.4s, v7.4s, v17.4s\n" + "fadd v22.4s, v22.4s, v16.4s\n" + "ldr q0, [%x[in_ptr], #0x130]\n" + "ldr q31, [%x[in_ptr], #0x160]\n" + "fmin v11.4s, v11.4s, v13.4s\n" + "fmin v10.4s, v10.4s, v13.4s\n" + "ldr q30, [%x[in_ptr], #0x20]\n" + "ldr q29, [%x[in_ptr], #0x50]\n" + "fmin v9.4s, v9.4s, v13.4s\n" + "fmin v8.4s, v8.4s, v13.4s\n" + "ldr q28, [%x[in_ptr], #0x80]\n" + "ldr q27, [%x[in_ptr], #0xb0]\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "ldr q26, [%x[in_ptr], #0xe0]\n" + "ldr q25, [%x[in_ptr], #0x110]\n" + "fmin v7.4s, v7.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "ldr q24, [%x[in_ptr], #0x140]\n" + "ldr q23, [%x[in_ptr], #0x170]\n" + "fmax v11.4s, v11.4s, v12.4s\n" + "fmax v10.4s, v10.4s, v12.4s\n" + "fmax v9.4s, v9.4s, v12.4s\n" + "fmax v8.4s, v8.4s, v12.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x180\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v7.4s, v7.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + ".inst 0x0ea16975 // bfcvtn v21.4h, v11.4s\n" + ".inst 0x0ea16954 // bfcvtn v20.4h, v10.4s\n" + ".inst 0x0ea16931 // bfcvtn v17.4h, v9.4s\n" + ".inst 0x0ea16910 // bfcvtn v16.4h, v8.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n" + "str d21, [x9, #0x0]\n" + "str d20, [x27, #0x0]\n" + ".inst 0x0ea168f5 // bfcvtn v21.4h, v7.4s\n" + ".inst 0x0ea16ad4 // bfcvtn v20.4h, v22.4s\n" + "str d17, [x26, #0x0]\n" + "ldr d17, [x9, #0x8]\n" + "str d16, [x25, #0x0]\n" + "ldr d16, [x27, #0x8]\n" + "str d18, [x24, #0x0]\n" + "ldr d18, [x26, #0x8]\n" + "str d19, [x23, #0x0]\n" + "shll v19.4s, v17.4h, #0x10\n" + "ldr d17, [x25, #0x8]\n" + "str d21, [x22, #0x0]\n" + "shll v22.4s, v16.4h, #0x10\n" + "ldr d16, [x24, #0x8]\n" + "str d20, [x21, #0x0]\n" + "shll v21.4s, v18.4h, #0x10\n" + "ldr d18, [x23, #0x8]\n" + "shll v20.4s, v17.4h, #0x10\n" + "fadd v6.4s, v6.4s, v19.4s\n" + "ldr d17, [x22, #0x8]\n" + "shll v19.4s, v16.4h, #0x10\n" + "fadd v5.4s, v5.4s, v22.4s\n" + "ldr d16, [x21, #0x8]\n" + "shll v18.4s, v18.4h, #0x10\n" + "fadd v4.4s, v4.4s, v21.4s\n" + "fadd v3.4s, v3.4s, v20.4s\n" + "shll v17.4s, v17.4h, #0x10\n" + "fadd v2.4s, v2.4s, v19.4s\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v1.4s, v1.4s, v18.4s\n" + "fmin v6.4s, v6.4s, v13.4s\n" + "fmin v5.4s, v5.4s, v13.4s\n" + "fadd v0.4s, v0.4s, v17.4s\n" + "fmin v4.4s, v4.4s, v13.4s\n" + "fadd v31.4s, v31.4s, v16.4s\n" + "fmin v3.4s, v3.4s, v13.4s\n" + "fmin v2.4s, v2.4s, v13.4s\n" + "fmin v1.4s, v1.4s, v13.4s\n" + "fmin v0.4s, v0.4s, v13.4s\n" + "fmax v6.4s, v6.4s, v12.4s\n" + "fmin v31.4s, v31.4s, v13.4s\n" + "fmax v5.4s, v5.4s, v12.4s\n" + "fmax v4.4s, v4.4s, v12.4s\n" + "fmax v3.4s, v3.4s, v12.4s\n" + "fmax v2.4s, v2.4s, v12.4s\n" + "fmax v1.4s, v1.4s, v12.4s\n" + "fmax v0.4s, v0.4s, v12.4s\n" + "fmax v31.4s, v31.4s, v12.4s\n" + ".inst 0x0ea168d5 // bfcvtn v21.4h, v6.4s\n" + ".inst 0x0ea168b4 // bfcvtn v20.4h, v5.4s\n" + ".inst 0x0ea16891 // bfcvtn v17.4h, v4.4s\n" + ".inst 0x0ea16870 // bfcvtn v16.4h, v3.4s\n" + ".inst 0x0ea16852 // bfcvtn v18.4h, v2.4s\n" + ".inst 0x0ea16833 // bfcvtn v19.4h, v1.4s\n" + "str d21, [x9, #0x8]\n" + "str d20, [x27, #0x8]\n" + ".inst 0x0ea16815 // bfcvtn v21.4h, v0.4s\n" + ".inst 0x0ea16bf4 // bfcvtn v20.4h, v31.4s\n" + "str d17, [x26, #0x8]\n" + "ldr d17, [x9, #0x10]\n" + "str d16, [x25, #0x8]\n" + "ldr d16, [x27, #0x10]\n" + "str d18, [x24, #0x8]\n" + "ldr d18, [x26, #0x10]\n" + "str d19, [x23, #0x8]\n" + "shll v19.4s, v17.4h, #0x10\n" + "ldr d17, [x25, #0x10]\n" + "str d21, [x22, #0x8]\n" + "shll v22.4s, v16.4h, #0x10\n" + "ldr d16, [x24, #0x10]\n" + "str d20, [x21, #0x8]\n" + "shll v21.4s, v18.4h, #0x10\n" + "ldr d18, [x23, #0x10]\n" + "shll v20.4s, v17.4h, #0x10\n" + "fadd v30.4s, v30.4s, v19.4s\n" + "ldr d17, [x22, #0x10]\n" + "shll v19.4s, v16.4h, #0x10\n" + "fadd v29.4s, v29.4s, v22.4s\n" + "ldr d16, [x21, #0x10]\n" + "shll v18.4s, v18.4h, #0x10\n" + "fadd v28.4s, v28.4s, v21.4s\n" + "fadd v27.4s, v27.4s, v20.4s\n" + "shll v17.4s, v17.4h, #0x10\n" + "fadd v26.4s, v26.4s, v19.4s\n" + "shll v16.4s, v16.4h, #0x10\n" + "fadd v25.4s, v25.4s, v18.4s\n" + "fmin v30.4s, v30.4s, v13.4s\n" + "fmin v29.4s, v29.4s, v13.4s\n" + "fadd v24.4s, v24.4s, v17.4s\n" + "fmin v28.4s, v28.4s, v13.4s\n" + "fadd v23.4s, v23.4s, v16.4s\n" + "fmin v27.4s, v27.4s, v13.4s\n" + "fmin v26.4s, v26.4s, v13.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmax v30.4s, v30.4s, v12.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmax v29.4s, v29.4s, v12.4s\n" + "fmax v28.4s, v28.4s, v12.4s\n" + "fmax v27.4s, v27.4s, v12.4s\n" + "fmax v26.4s, v26.4s, v12.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + ".inst 0x0ea16bd1 // bfcvtn v17.4h, v30.4s\n" + ".inst 0x0ea16bb0 // bfcvtn v16.4h, v29.4s\n" + ".inst 0x0ea16b95 // bfcvtn v21.4h, v28.4s\n" + ".inst 0x0ea16b74 // bfcvtn v20.4h, v27.4s\n" + ".inst 0x0ea16b53 // bfcvtn v19.4h, v26.4s\n" + ".inst 0x0ea16b32 // bfcvtn v18.4h, v25.4s\n" + "str d17, [x9, #0x10]\n" + "add x9, x9, #0x18\n" + "str d16, [x27, #0x10]\n" + ".inst 0x0ea16b11 // bfcvtn v17.4h, v24.4s\n" + ".inst 0x0ea16af0 // bfcvtn v16.4h, v23.4s\n" + "add x27, x27, #0x18\n" + "str d21, [x26, #0x10]\n" + "add x26, x26, #0x18\n" + "str d20, [x25, #0x10]\n" + "add x25, x25, #0x18\n" + "str d19, [x24, #0x10]\n" + "add x24, x24, #0x18\n" + "str d18, [x23, #0x10]\n" + "add x23, x23, #0x18\n" + "str d17, [x22, #0x10]\n" + "add x22, x22, #0x18\n" + "str d16, [x21, #0x10]\n" + "add x21, x21, #0x18\n" + "bge 104b\n" + "105:" // Accumulate: Height 8: no full blocks + "cbz x10, 107f\n" + "mov x20, %x[in_ptr]\n" + "106:" // Accumulate: Height 8: Single loop + "ldr h23, [x9, #0x0]\n" + "ldr h22, [x27, #0x0]\n" + "subs x10, x10, #0x1\n" + "ldr h21, [x26, #0x0]\n" + "ldr h20, [x25, #0x0]\n" + "ldr h19, [x24, #0x0]\n" + "ldr h18, [x23, #0x0]\n" + "ldr h17, [x22, #0x0]\n" + "ldr h16, [x21, #0x0]\n" + "shll v31.4s, v23.4h, #0x10\n" + "shll v30.4s, v22.4h, #0x10\n" + "ldr s29, [%x[in_ptr], #0x0]\n" + "ldr s28, [%x[in_ptr], #0x30]\n" + "shll v27.4s, v21.4h, #0x10\n" + "shll v26.4s, v20.4h, #0x10\n" + "ldr s25, [%x[in_ptr], #0x60]\n" + "ldr s24, [%x[in_ptr], #0x90]\n" + "shll v21.4s, v19.4h, #0x10\n" + "shll v20.4s, v18.4h, #0x10\n" + "ldr s19, [%x[in_ptr], #0xc0]\n" + "ldr s18, [%x[in_ptr], #0xf0]\n" + "shll v17.4s, v17.4h, #0x10\n" + "shll v16.4s, v16.4h, #0x10\n" + "ldr s23, [%x[in_ptr], #0x120]\n" + "ldr s22, [%x[in_ptr], #0x150]\n" + "fadd v29.4s, v29.4s, v31.4s\n" + "fadd v28.4s, v28.4s, v30.4s\n" + "fadd v25.4s, v25.4s, v27.4s\n" + "fadd v24.4s, v24.4s, v26.4s\n" + "add %x[in_ptr], %x[in_ptr], #0x4\n" + "fadd v19.4s, v19.4s, v21.4s\n" + "fadd v18.4s, v18.4s, v20.4s\n" + "fadd v23.4s, v23.4s, v17.4s\n" + "fadd v22.4s, v22.4s, v16.4s\n" + "fmin v29.4s, v29.4s, v13.4s\n" + "fmin v28.4s, v28.4s, v13.4s\n" + "fmin v25.4s, v25.4s, v13.4s\n" + "fmin v24.4s, v24.4s, v13.4s\n" + "fmin v19.4s, v19.4s, v13.4s\n" + "fmin v18.4s, v18.4s, v13.4s\n" + "fmin v23.4s, v23.4s, v13.4s\n" + "fmin v22.4s, v22.4s, v13.4s\n" + "fmax v29.4s, v29.4s, v12.4s\n" + "fmax v28.4s, v28.4s, v12.4s\n" + "fmax v25.4s, v25.4s, v12.4s\n" + "fmax v24.4s, v24.4s, v12.4s\n" + "fmax v19.4s, v19.4s, v12.4s\n" + "fmax v18.4s, v18.4s, v12.4s\n" + "fmax v23.4s, v23.4s, v12.4s\n" + "fmax v22.4s, v22.4s, v12.4s\n" + ".inst 0x0ea16bb1 // bfcvtn v17.4h, v29.4s\n" + ".inst 0x0ea16b90 // bfcvtn v16.4h, v28.4s\n" + ".inst 0x0ea16b35 // bfcvtn v21.4h, v25.4s\n" + ".inst 0x0ea16b14 // bfcvtn v20.4h, v24.4s\n" + ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n" + ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n" + "str h17, [x9, #0x0]\n" + "add x9, x9, #0x2\n" + "str h16, [x27, #0x0]\n" + ".inst 0x0ea16af1 // bfcvtn v17.4h, v23.4s\n" + ".inst 0x0ea16ad0 // bfcvtn v16.4h, v22.4s\n" + "add x27, x27, #0x2\n" + "str h21, [x26, #0x0]\n" + "add x26, x26, #0x2\n" + "str h20, [x25, #0x0]\n" + "add x25, x25, #0x2\n" + "str h19, [x24, #0x0]\n" + "add x24, x24, #0x2\n" + "str h18, [x23, #0x0]\n" + "add x23, x23, #0x2\n" + "str h17, [x22, #0x0]\n" + "add x22, x22, #0x2\n" + "str h16, [x21, #0x0]\n" + "add x21, x21, #0x2\n" + "bne 106b\n" + "add %x[in_ptr], x20, #0x180\n" + "107:" // Accumulate: Height 8: no oddments + "subs %x[rows], %x[rows], #0x8\n" + "add %x[out_ptr], %x[out_ptr], x11\n" + "bgt 67b\n" + "108:" // Exit + : [in_ptr] "+&r" (in_ptr), [out_ptr] "+&r" (out_ptr), [rows] "+&r" (rows) + : [accumulate] "r" (accumulate), [bias] "r" (bias), [cols] "r" (cols), [ldout] "r" (ldout), [maxval] "r" (maxval), [minval] "r" (minval) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + ); +} + + +#endif // __aarch64__ diff --git a/src/core/NEON/kernels/arm_gemm/merges/list-sve.hpp b/src/core/NEON/kernels/arm_gemm/merges/list-sve.hpp index aded4b3b8c..d11740e5c8 100644 --- a/src/core/NEON/kernels/arm_gemm/merges/list-sve.hpp +++ b/src/core/NEON/kernels/arm_gemm/merges/list-sve.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021 Arm Limited. + * Copyright (c) 2021, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,5 +24,6 @@ #include "sve_merge_fp16_3VLx8.hpp" #include "sve_merge_fp32_3VLx8.hpp" +#include "sve_merge_fp32_bf16_8x3VL.hpp" #include "sve_merge_s32_3VLx8.hpp" #include "sve_merge_u32_3VLx8.hpp" \ No newline at end of file diff --git a/src/core/NEON/kernels/arm_gemm/merges/list.hpp b/src/core/NEON/kernels/arm_gemm/merges/list.hpp index 3443c6f0a8..fd6be5b69b 100644 --- a/src/core/NEON/kernels/arm_gemm/merges/list.hpp +++ b/src/core/NEON/kernels/arm_gemm/merges/list.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2020, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -23,6 +23,7 @@ */ #include "a32_merge_float_8x6.hpp" #include "a64_merge_fp32_12x8.hpp" +#include "a64_merge_fp32_bf16_8x12.hpp" #include "a64_merge_s32_12x8.hpp" #include "a64_merge_s32_4x4.hpp" #include "a64_merge_u32_12x8.hpp" diff --git a/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_bf16_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_bf16_8x3VL.hpp new file mode 100644 index 0000000000..5d4a8bf347 --- /dev/null +++ b/src/core/NEON/kernels/arm_gemm/merges/sve_merge_fp32_bf16_8x3VL.hpp @@ -0,0 +1,2137 @@ +/* + * Copyright (c) 2024 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#pragma once +#ifdef ARM_COMPUTE_ENABLE_SVE + +template<> +void MergeResults<3, 8, true>( + bfloat16 *out_ptr, + const float * in_ptr, + const int ldout, + const int y0, const int ymax, + const int x0, const int xmax, + const bfloat16 *bias, + Activation act, + bool accumulate) +{ + float maxval = static_cast(std::numeric_limits::infinity()); + float minval = - static_cast(std::numeric_limits::infinity()); + + switch(act.type) { + default: + case Activation::Type::None: + break; + case Activation::Type::BoundedReLU: + maxval = static_cast(act.param1); + /* fall through */ + case Activation::Type::ReLU: + minval = 0; + break; + } + + size_t rows = ymax-y0; + size_t cols = xmax-x0; + + out_ptr += (y0 * ldout) + x0; + bias = (bias == nullptr) ? nullptr : bias + x0; + + __asm__ __volatile__( + "ptrue p3.b\n" + "cbz %x[cols], 52f\n" + "cbz %x[rows], 52f\n" + "mov x12, #0x20\n" + "dup z12.s, %w[maxval]\n" + "dup z11.s, %w[minval]\n" + "mul x12, %x[ldout], x12\n" + "cbnz %x[accumulate], 34f\n" + "1:" // Initial: Row loop + "cmp %x[rows], #0x7\n" + "bgt 30f\n" + "beq 26f\n" + "cmp %x[rows], #0x5\n" + "bgt 22f\n" + "beq 18f\n" + "cmp %x[rows], #0x3\n" + "bgt 14f\n" + "beq 10f\n" + "cmp %x[rows], #0x1\n" + "bgt 6f\n" + "2:" // Initial: Height 1 + "mov x11, %x[cols]\n" + "mov x10, %x[out_ptr]\n" + "mov x9, %x[bias]\n" + "3:" // Initial: Height 1: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "whilelt p0.s, x21, x11\n" + "incw x21\n" + "cbnz %x[bias], 4f\n" + "mov z21.b, #0x0\n" + "mov z20.b, #0x0\n" + "mov z19.b, #0x0\n" + "b 5f\n" + "4:" // Initial: Height 1: Width 3: bias + "ld1h { z18.s }, p2/Z, [x9]\n" + "ld1h { z17.s }, p1/Z, [x9, #1, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x9, #2, MUL VL]\n" + "lsl z21.s, z18.s, #0x10\n" + "lsl z20.s, z17.s, #0x10\n" + "lsl z19.s, z16.s, #0x10\n" + "5:" // Initial: Height 1: Width 3: init done + "ld1w { z17.s }, p2/Z, [%x[in_ptr]]\n" + "ld1w { z16.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "decw x11, ALL, MUL #3\n" + "inch x9, ALL, MUL #3\n" + "ld1w { z18.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "fadd z17.s, z17.s, z21.s\n" + "fadd z16.s, z16.s, z20.s\n" + "cmp x11, XZR\n" + "fadd z18.s, z18.s, z19.s\n" + "fmin z17.s, p3/M, z17.s, z12.s\n" + "fmin z16.s, p3/M, z16.s, z12.s\n" + "fmin z18.s, p3/M, z18.s, z12.s\n" + "fmax z17.s, p3/M, z17.s, z11.s\n" + "fmax z16.s, p3/M, z16.s, z11.s\n" + "fmax z18.s, p3/M, z18.s, z11.s\n" + ".inst 0x658aae31 // bfcvt z17.h, p3/M, z17.s\n" + ".inst 0x658aae10 // bfcvt z16.h, p3/M, z16.s\n" + "st1h { z17.s }, p2, [x10]\n" + "st1h { z16.s }, p1, [x10, #1, MUL VL]\n" + ".inst 0x658aae50 // bfcvt z16.h, p3/M, z18.s\n" + "st1h { z16.s }, p0, [x10, #2, MUL VL]\n" + "inch x10, ALL, MUL #3\n" + "bgt 3b\n" + "b 52f\n" + "6:" // Initial: Height 2 + "mov x10, %x[out_ptr]\n" + "mov x11, %x[cols]\n" + "mov x9, %x[bias]\n" + "add x28, x10, %x[ldout], LSL #1\n" + "7:" // Initial: Height 2: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "whilelt p0.s, x21, x11\n" + "incw x21\n" + "cbnz %x[bias], 8f\n" + "mov z24.b, #0x0\n" + "mov z23.b, #0x0\n" + "mov z22.b, #0x0\n" + "b 9f\n" + "8:" // Initial: Height 2: Width 3: bias + "ld1h { z18.s }, p2/Z, [x9]\n" + "ld1h { z17.s }, p1/Z, [x9, #1, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x9, #2, MUL VL]\n" + "lsl z24.s, z18.s, #0x10\n" + "lsl z23.s, z17.s, #0x10\n" + "lsl z22.s, z16.s, #0x10\n" + "9:" // Initial: Height 2: Width 3: init done + "ld1w { z17.s }, p2/Z, [%x[in_ptr]]\n" + "ld1w { z16.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "decw x11, ALL, MUL #3\n" + "inch x9, ALL, MUL #3\n" + "ld1w { z19.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "ld1w { z18.s }, p2/Z, [%x[in_ptr], #3, MUL VL]\n" + "ld1w { z21.s }, p1/Z, [%x[in_ptr], #4, MUL VL]\n" + "ld1w { z20.s }, p0/Z, [%x[in_ptr], #5, MUL VL]\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "fadd z17.s, z17.s, z24.s\n" + "fadd z16.s, z16.s, z23.s\n" + "cmp x11, XZR\n" + "fadd z19.s, z19.s, z22.s\n" + "fadd z18.s, z18.s, z24.s\n" + "fadd z21.s, z21.s, z23.s\n" + "fadd z20.s, z20.s, z22.s\n" + "fmin z17.s, p3/M, z17.s, z12.s\n" + "fmin z16.s, p3/M, z16.s, z12.s\n" + "fmin z19.s, p3/M, z19.s, z12.s\n" + "fmin z18.s, p3/M, z18.s, z12.s\n" + "fmin z21.s, p3/M, z21.s, z12.s\n" + "fmin z20.s, p3/M, z20.s, z12.s\n" + "fmax z17.s, p3/M, z17.s, z11.s\n" + "fmax z16.s, p3/M, z16.s, z11.s\n" + "fmax z19.s, p3/M, z19.s, z11.s\n" + "fmax z18.s, p3/M, z18.s, z11.s\n" + "fmax z21.s, p3/M, z21.s, z11.s\n" + "fmax z20.s, p3/M, z20.s, z11.s\n" + ".inst 0x658aae31 // bfcvt z17.h, p3/M, z17.s\n" + ".inst 0x658aae10 // bfcvt z16.h, p3/M, z16.s\n" + ".inst 0x658aae73 // bfcvt z19.h, p3/M, z19.s\n" + ".inst 0x658aae52 // bfcvt z18.h, p3/M, z18.s\n" + "st1h { z17.s }, p2, [x10]\n" + "st1h { z16.s }, p1, [x10, #1, MUL VL]\n" + ".inst 0x658aaeb1 // bfcvt z17.h, p3/M, z21.s\n" + ".inst 0x658aae90 // bfcvt z16.h, p3/M, z20.s\n" + "st1h { z19.s }, p0, [x10, #2, MUL VL]\n" + "inch x10, ALL, MUL #3\n" + "st1h { z18.s }, p2, [x28]\n" + "st1h { z17.s }, p1, [x28, #1, MUL VL]\n" + "st1h { z16.s }, p0, [x28, #2, MUL VL]\n" + "inch x28, ALL, MUL #3\n" + "bgt 7b\n" + "b 52f\n" + "10:" // Initial: Height 3 + "mov x10, %x[out_ptr]\n" + "mov x11, %x[cols]\n" + "mov x9, %x[bias]\n" + "add x28, x10, %x[ldout], LSL #1\n" + "add x27, x28, %x[ldout], LSL #1\n" + "11:" // Initial: Height 3: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "whilelt p0.s, x21, x11\n" + "incw x21\n" + "cbnz %x[bias], 12f\n" + "mov z27.b, #0x0\n" + "mov z26.b, #0x0\n" + "mov z25.b, #0x0\n" + "b 13f\n" + "12:" // Initial: Height 3: Width 3: bias + "ld1h { z18.s }, p2/Z, [x9]\n" + "ld1h { z17.s }, p1/Z, [x9, #1, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x9, #2, MUL VL]\n" + "lsl z27.s, z18.s, #0x10\n" + "lsl z26.s, z17.s, #0x10\n" + "lsl z25.s, z16.s, #0x10\n" + "13:" // Initial: Height 3: Width 3: init done + "ld1w { z18.s }, p2/Z, [%x[in_ptr]]\n" + "ld1w { z17.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "decw x11, ALL, MUL #3\n" + "inch x9, ALL, MUL #3\n" + "ld1w { z16.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "ld1w { z21.s }, p2/Z, [%x[in_ptr], #3, MUL VL]\n" + "ld1w { z20.s }, p1/Z, [%x[in_ptr], #4, MUL VL]\n" + "ld1w { z19.s }, p0/Z, [%x[in_ptr], #5, MUL VL]\n" + "ld1w { z24.s }, p2/Z, [%x[in_ptr], #6, MUL VL]\n" + "ld1w { z23.s }, p1/Z, [%x[in_ptr], #7, MUL VL]\n" + "fadd z18.s, z18.s, z27.s\n" + "fadd z17.s, z17.s, z26.s\n" + "ld1w { z22.s }, p0/Z, [x20, #-8, MUL VL]\n" + "fadd z16.s, z16.s, z25.s\n" + "fadd z21.s, z21.s, z27.s\n" + "cmp x11, XZR\n" + "fadd z20.s, z20.s, z26.s\n" + "fadd z19.s, z19.s, z25.s\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "fadd z24.s, z24.s, z27.s\n" + "fadd z23.s, z23.s, z26.s\n" + "fmin z18.s, p3/M, z18.s, z12.s\n" + "fmin z17.s, p3/M, z17.s, z12.s\n" + "fadd z22.s, z22.s, z25.s\n" + "fmin z16.s, p3/M, z16.s, z12.s\n" + "fmin z21.s, p3/M, z21.s, z12.s\n" + "fmin z20.s, p3/M, z20.s, z12.s\n" + "fmin z19.s, p3/M, z19.s, z12.s\n" + "fmin z24.s, p3/M, z24.s, z12.s\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmin z22.s, p3/M, z22.s, z12.s\n" + "fmax z18.s, p3/M, z18.s, z11.s\n" + "fmax z17.s, p3/M, z17.s, z11.s\n" + "fmax z16.s, p3/M, z16.s, z11.s\n" + "fmax z21.s, p3/M, z21.s, z11.s\n" + "fmax z20.s, p3/M, z20.s, z11.s\n" + "fmax z19.s, p3/M, z19.s, z11.s\n" + "fmax z24.s, p3/M, z24.s, z11.s\n" + ".inst 0x658aae52 // bfcvt z18.h, p3/M, z18.s\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + "fmax z22.s, p3/M, z22.s, z11.s\n" + ".inst 0x658aae31 // bfcvt z17.h, p3/M, z17.s\n" + ".inst 0x658aae10 // bfcvt z16.h, p3/M, z16.s\n" + ".inst 0x658aaeb5 // bfcvt z21.h, p3/M, z21.s\n" + ".inst 0x658aae94 // bfcvt z20.h, p3/M, z20.s\n" + "st1h { z18.s }, p2, [x10]\n" + ".inst 0x658aae73 // bfcvt z19.h, p3/M, z19.s\n" + ".inst 0x658aaf12 // bfcvt z18.h, p3/M, z24.s\n" + "st1h { z17.s }, p1, [x10, #1, MUL VL]\n" + "st1h { z16.s }, p0, [x10, #2, MUL VL]\n" + ".inst 0x658aaef1 // bfcvt z17.h, p3/M, z23.s\n" + ".inst 0x658aaed0 // bfcvt z16.h, p3/M, z22.s\n" + "inch x10, ALL, MUL #3\n" + "st1h { z21.s }, p2, [x28]\n" + "st1h { z20.s }, p1, [x28, #1, MUL VL]\n" + "st1h { z19.s }, p0, [x28, #2, MUL VL]\n" + "inch x28, ALL, MUL #3\n" + "st1h { z18.s }, p2, [x27]\n" + "st1h { z17.s }, p1, [x27, #1, MUL VL]\n" + "st1h { z16.s }, p0, [x27, #2, MUL VL]\n" + "inch x27, ALL, MUL #3\n" + "bgt 11b\n" + "b 52f\n" + "14:" // Initial: Height 4 + "mov x10, %x[out_ptr]\n" + "mov x11, %x[cols]\n" + "mov x9, %x[bias]\n" + "add x28, x10, %x[ldout], LSL #1\n" + "add x27, x28, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "15:" // Initial: Height 4: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "whilelt p0.s, x21, x11\n" + "incw x21\n" + "cbnz %x[bias], 16f\n" + "mov z30.b, #0x0\n" + "mov z29.b, #0x0\n" + "mov z28.b, #0x0\n" + "b 17f\n" + "16:" // Initial: Height 4: Width 3: bias + "ld1h { z18.s }, p2/Z, [x9]\n" + "ld1h { z17.s }, p1/Z, [x9, #1, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x9, #2, MUL VL]\n" + "lsl z30.s, z18.s, #0x10\n" + "lsl z29.s, z17.s, #0x10\n" + "lsl z28.s, z16.s, #0x10\n" + "17:" // Initial: Height 4: Width 3: init done + "ld1w { z18.s }, p2/Z, [%x[in_ptr]]\n" + "ld1w { z17.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "decw x11, ALL, MUL #3\n" + "inch x9, ALL, MUL #3\n" + "ld1w { z16.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "ld1w { z24.s }, p2/Z, [%x[in_ptr], #3, MUL VL]\n" + "ld1w { z23.s }, p1/Z, [%x[in_ptr], #4, MUL VL]\n" + "ld1w { z22.s }, p0/Z, [%x[in_ptr], #5, MUL VL]\n" + "ld1w { z21.s }, p2/Z, [%x[in_ptr], #6, MUL VL]\n" + "ld1w { z20.s }, p1/Z, [%x[in_ptr], #7, MUL VL]\n" + "fadd z18.s, z18.s, z30.s\n" + "fadd z17.s, z17.s, z29.s\n" + "ld1w { z19.s }, p0/Z, [x20, #-8, MUL VL]\n" + "ld1w { z27.s }, p2/Z, [x20, #-7, MUL VL]\n" + "fadd z16.s, z16.s, z28.s\n" + "fadd z24.s, z24.s, z30.s\n" + "ld1w { z26.s }, p1/Z, [x20, #-6, MUL VL]\n" + "ld1w { z25.s }, p0/Z, [x20, #-5, MUL VL]\n" + "fadd z23.s, z23.s, z29.s\n" + "fadd z22.s, z22.s, z28.s\n" + "fadd z21.s, z21.s, z30.s\n" + "fadd z20.s, z20.s, z29.s\n" + "fmin z18.s, p3/M, z18.s, z12.s\n" + "fmin z17.s, p3/M, z17.s, z12.s\n" + "fadd z19.s, z19.s, z28.s\n" + "fadd z27.s, z27.s, z30.s\n" + "fmin z16.s, p3/M, z16.s, z12.s\n" + "fmin z24.s, p3/M, z24.s, z12.s\n" + "fadd z26.s, z26.s, z29.s\n" + "fadd z25.s, z25.s, z28.s\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmin z22.s, p3/M, z22.s, z12.s\n" + "fmin z21.s, p3/M, z21.s, z12.s\n" + "fmin z20.s, p3/M, z20.s, z12.s\n" + "fmin z19.s, p3/M, z19.s, z12.s\n" + "fmin z27.s, p3/M, z27.s, z12.s\n" + "fmin z26.s, p3/M, z26.s, z12.s\n" + "fmin z25.s, p3/M, z25.s, z12.s\n" + "fmax z18.s, p3/M, z18.s, z11.s\n" + "fmax z17.s, p3/M, z17.s, z11.s\n" + "fmax z16.s, p3/M, z16.s, z11.s\n" + "fmax z24.s, p3/M, z24.s, z11.s\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + "fmax z22.s, p3/M, z22.s, z11.s\n" + "fmax z21.s, p3/M, z21.s, z11.s\n" + "fmax z20.s, p3/M, z20.s, z11.s\n" + ".inst 0x658aae52 // bfcvt z18.h, p3/M, z18.s\n" + ".inst 0x658aae31 // bfcvt z17.h, p3/M, z17.s\n" + "fmax z19.s, p3/M, z19.s, z11.s\n" + "fmax z27.s, p3/M, z27.s, z11.s\n" + ".inst 0x658aae10 // bfcvt z16.h, p3/M, z16.s\n" + ".inst 0x658aaf18 // bfcvt z24.h, p3/M, z24.s\n" + "fmax z26.s, p3/M, z26.s, z11.s\n" + "fmax z25.s, p3/M, z25.s, z11.s\n" + ".inst 0x658aaef7 // bfcvt z23.h, p3/M, z23.s\n" + ".inst 0x658aaed6 // bfcvt z22.h, p3/M, z22.s\n" + "cmp x11, XZR\n" + "st1h { z18.s }, p2, [x10]\n" + ".inst 0x658aaeb5 // bfcvt z21.h, p3/M, z21.s\n" + ".inst 0x658aae94 // bfcvt z20.h, p3/M, z20.s\n" + "st1h { z17.s }, p1, [x10, #1, MUL VL]\n" + ".inst 0x658aae73 // bfcvt z19.h, p3/M, z19.s\n" + ".inst 0x658aaf72 // bfcvt z18.h, p3/M, z27.s\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "st1h { z16.s }, p0, [x10, #2, MUL VL]\n" + ".inst 0x658aaf51 // bfcvt z17.h, p3/M, z26.s\n" + ".inst 0x658aaf30 // bfcvt z16.h, p3/M, z25.s\n" + "inch x10, ALL, MUL #3\n" + "st1h { z24.s }, p2, [x28]\n" + "st1h { z23.s }, p1, [x28, #1, MUL VL]\n" + "st1h { z22.s }, p0, [x28, #2, MUL VL]\n" + "inch x28, ALL, MUL #3\n" + "st1h { z21.s }, p2, [x27]\n" + "st1h { z20.s }, p1, [x27, #1, MUL VL]\n" + "st1h { z19.s }, p0, [x27, #2, MUL VL]\n" + "inch x27, ALL, MUL #3\n" + "st1h { z18.s }, p2, [x26]\n" + "st1h { z17.s }, p1, [x26, #1, MUL VL]\n" + "st1h { z16.s }, p0, [x26, #2, MUL VL]\n" + "inch x26, ALL, MUL #3\n" + "bgt 15b\n" + "b 52f\n" + "18:" // Initial: Height 5 + "mov x10, %x[out_ptr]\n" + "mov x11, %x[cols]\n" + "mov x9, %x[bias]\n" + "add x28, x10, %x[ldout], LSL #1\n" + "add x27, x28, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "19:" // Initial: Height 5: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "whilelt p0.s, x21, x11\n" + "incw x21\n" + "cbnz %x[bias], 20f\n" + "mov z1.b, #0x0\n" + "mov z0.b, #0x0\n" + "mov z31.b, #0x0\n" + "b 21f\n" + "20:" // Initial: Height 5: Width 3: bias + "ld1h { z18.s }, p2/Z, [x9]\n" + "ld1h { z17.s }, p1/Z, [x9, #1, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x9, #2, MUL VL]\n" + "lsl z1.s, z18.s, #0x10\n" + "lsl z0.s, z17.s, #0x10\n" + "lsl z31.s, z16.s, #0x10\n" + "21:" // Initial: Height 5: Width 3: init done + "ld1w { z21.s }, p2/Z, [%x[in_ptr]]\n" + "ld1w { z20.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "decw x11, ALL, MUL #3\n" + "inch x9, ALL, MUL #3\n" + "ld1w { z19.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "ld1w { z18.s }, p2/Z, [%x[in_ptr], #3, MUL VL]\n" + "ld1w { z17.s }, p1/Z, [%x[in_ptr], #4, MUL VL]\n" + "ld1w { z16.s }, p0/Z, [%x[in_ptr], #5, MUL VL]\n" + "ld1w { z24.s }, p2/Z, [%x[in_ptr], #6, MUL VL]\n" + "ld1w { z23.s }, p1/Z, [%x[in_ptr], #7, MUL VL]\n" + "fadd z21.s, z21.s, z1.s\n" + "fadd z20.s, z20.s, z0.s\n" + "ld1w { z22.s }, p0/Z, [x20, #-8, MUL VL]\n" + "ld1w { z30.s }, p2/Z, [x20, #-7, MUL VL]\n" + "fadd z19.s, z19.s, z31.s\n" + "fadd z18.s, z18.s, z1.s\n" + "ld1w { z29.s }, p1/Z, [x20, #-6, MUL VL]\n" + "ld1w { z28.s }, p0/Z, [x20, #-5, MUL VL]\n" + "fadd z17.s, z17.s, z0.s\n" + "fadd z16.s, z16.s, z31.s\n" + "ld1w { z27.s }, p2/Z, [x20, #-4, MUL VL]\n" + "ld1w { z26.s }, p1/Z, [x20, #-3, MUL VL]\n" + "fadd z24.s, z24.s, z1.s\n" + "fadd z23.s, z23.s, z0.s\n" + "ld1w { z25.s }, p0/Z, [x20, #-2, MUL VL]\n" + "fadd z22.s, z22.s, z31.s\n" + "fadd z30.s, z30.s, z1.s\n" + "fmin z21.s, p3/M, z21.s, z12.s\n" + "fadd z29.s, z29.s, z0.s\n" + "fadd z28.s, z28.s, z31.s\n" + "fmin z20.s, p3/M, z20.s, z12.s\n" + "fmin z19.s, p3/M, z19.s, z12.s\n" + "fadd z27.s, z27.s, z1.s\n" + "fadd z26.s, z26.s, z0.s\n" + "fmin z18.s, p3/M, z18.s, z12.s\n" + "fmin z17.s, p3/M, z17.s, z12.s\n" + "fadd z25.s, z25.s, z31.s\n" + "fmin z16.s, p3/M, z16.s, z12.s\n" + "fmin z24.s, p3/M, z24.s, z12.s\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmin z22.s, p3/M, z22.s, z12.s\n" + "fmin z30.s, p3/M, z30.s, z12.s\n" + "fmin z29.s, p3/M, z29.s, z12.s\n" + "fmin z28.s, p3/M, z28.s, z12.s\n" + "fmin z27.s, p3/M, z27.s, z12.s\n" + "fmin z26.s, p3/M, z26.s, z12.s\n" + "fmin z25.s, p3/M, z25.s, z12.s\n" + "fmax z21.s, p3/M, z21.s, z11.s\n" + "fmax z20.s, p3/M, z20.s, z11.s\n" + "fmax z19.s, p3/M, z19.s, z11.s\n" + "fmax z18.s, p3/M, z18.s, z11.s\n" + "fmax z17.s, p3/M, z17.s, z11.s\n" + "fmax z16.s, p3/M, z16.s, z11.s\n" + "fmax z24.s, p3/M, z24.s, z11.s\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + ".inst 0x658aaeb5 // bfcvt z21.h, p3/M, z21.s\n" + ".inst 0x658aae94 // bfcvt z20.h, p3/M, z20.s\n" + "fmax z22.s, p3/M, z22.s, z11.s\n" + "fmax z30.s, p3/M, z30.s, z11.s\n" + ".inst 0x658aae73 // bfcvt z19.h, p3/M, z19.s\n" + ".inst 0x658aae52 // bfcvt z18.h, p3/M, z18.s\n" + "fmax z29.s, p3/M, z29.s, z11.s\n" + "fmax z28.s, p3/M, z28.s, z11.s\n" + ".inst 0x658aae31 // bfcvt z17.h, p3/M, z17.s\n" + ".inst 0x658aae10 // bfcvt z16.h, p3/M, z16.s\n" + "fmax z27.s, p3/M, z27.s, z11.s\n" + "fmax z26.s, p3/M, z26.s, z11.s\n" + "st1h { z21.s }, p2, [x10]\n" + ".inst 0x658aaf18 // bfcvt z24.h, p3/M, z24.s\n" + "fmax z25.s, p3/M, z25.s, z11.s\n" + "cmp x11, XZR\n" + "st1h { z20.s }, p1, [x10, #1, MUL VL]\n" + ".inst 0x658aaef7 // bfcvt z23.h, p3/M, z23.s\n" + "st1h { z19.s }, p0, [x10, #2, MUL VL]\n" + ".inst 0x658aaed6 // bfcvt z22.h, p3/M, z22.s\n" + ".inst 0x658aafd5 // bfcvt z21.h, p3/M, z30.s\n" + "inch x10, ALL, MUL #3\n" + "st1h { z18.s }, p2, [x28]\n" + ".inst 0x658aafb4 // bfcvt z20.h, p3/M, z29.s\n" + ".inst 0x658aaf93 // bfcvt z19.h, p3/M, z28.s\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "st1h { z17.s }, p1, [x28, #1, MUL VL]\n" + ".inst 0x658aaf72 // bfcvt z18.h, p3/M, z27.s\n" + ".inst 0x658aaf51 // bfcvt z17.h, p3/M, z26.s\n" + "st1h { z16.s }, p0, [x28, #2, MUL VL]\n" + ".inst 0x658aaf30 // bfcvt z16.h, p3/M, z25.s\n" + "inch x28, ALL, MUL #3\n" + "st1h { z24.s }, p2, [x27]\n" + "st1h { z23.s }, p1, [x27, #1, MUL VL]\n" + "st1h { z22.s }, p0, [x27, #2, MUL VL]\n" + "inch x27, ALL, MUL #3\n" + "st1h { z21.s }, p2, [x26]\n" + "st1h { z20.s }, p1, [x26, #1, MUL VL]\n" + "st1h { z19.s }, p0, [x26, #2, MUL VL]\n" + "inch x26, ALL, MUL #3\n" + "st1h { z18.s }, p2, [x25]\n" + "st1h { z17.s }, p1, [x25, #1, MUL VL]\n" + "st1h { z16.s }, p0, [x25, #2, MUL VL]\n" + "inch x25, ALL, MUL #3\n" + "bgt 19b\n" + "b 52f\n" + "22:" // Initial: Height 6 + "mov x10, %x[out_ptr]\n" + "mov x11, %x[cols]\n" + "mov x9, %x[bias]\n" + "add x28, x10, %x[ldout], LSL #1\n" + "add x27, x28, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "add x24, x25, %x[ldout], LSL #1\n" + "23:" // Initial: Height 6: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "whilelt p0.s, x21, x11\n" + "incw x21\n" + "cbnz %x[bias], 24f\n" + "mov z4.b, #0x0\n" + "mov z3.b, #0x0\n" + "mov z2.b, #0x0\n" + "b 25f\n" + "24:" // Initial: Height 6: Width 3: bias + "ld1h { z18.s }, p2/Z, [x9]\n" + "ld1h { z17.s }, p1/Z, [x9, #1, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x9, #2, MUL VL]\n" + "lsl z4.s, z18.s, #0x10\n" + "lsl z3.s, z17.s, #0x10\n" + "lsl z2.s, z16.s, #0x10\n" + "25:" // Initial: Height 6: Width 3: init done + "ld1w { z17.s }, p2/Z, [%x[in_ptr]]\n" + "ld1w { z16.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "decw x11, ALL, MUL #3\n" + "inch x9, ALL, MUL #3\n" + "ld1w { z21.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "ld1w { z20.s }, p2/Z, [%x[in_ptr], #3, MUL VL]\n" + "ld1w { z19.s }, p1/Z, [%x[in_ptr], #4, MUL VL]\n" + "ld1w { z18.s }, p0/Z, [%x[in_ptr], #5, MUL VL]\n" + "ld1w { z1.s }, p2/Z, [%x[in_ptr], #6, MUL VL]\n" + "ld1w { z0.s }, p1/Z, [%x[in_ptr], #7, MUL VL]\n" + "fadd z17.s, z17.s, z4.s\n" + "fadd z16.s, z16.s, z3.s\n" + "ld1w { z25.s }, p0/Z, [x20, #-8, MUL VL]\n" + "ld1w { z24.s }, p2/Z, [x20, #-7, MUL VL]\n" + "fadd z21.s, z21.s, z2.s\n" + "fadd z20.s, z20.s, z4.s\n" + "ld1w { z23.s }, p1/Z, [x20, #-6, MUL VL]\n" + "ld1w { z22.s }, p0/Z, [x20, #-5, MUL VL]\n" + "fadd z19.s, z19.s, z3.s\n" + "fadd z18.s, z18.s, z2.s\n" + "ld1w { z31.s }, p2/Z, [x20, #-4, MUL VL]\n" + "ld1w { z30.s }, p1/Z, [x20, #-3, MUL VL]\n" + "fadd z1.s, z1.s, z4.s\n" + "fadd z0.s, z0.s, z3.s\n" + "ld1w { z29.s }, p0/Z, [x20, #-2, MUL VL]\n" + "ld1w { z28.s }, p2/Z, [x20, #-1, MUL VL]\n" + "fadd z25.s, z25.s, z2.s\n" + "fadd z24.s, z24.s, z4.s\n" + "ld1w { z27.s }, p1/Z, [x20]\n" + "ld1w { z26.s }, p0/Z, [x20, #1, MUL VL]\n" + "fadd z23.s, z23.s, z3.s\n" + "fadd z22.s, z22.s, z2.s\n" + "fadd z31.s, z31.s, z4.s\n" + "fadd z30.s, z30.s, z3.s\n" + "fmin z17.s, p3/M, z17.s, z12.s\n" + "fmin z16.s, p3/M, z16.s, z12.s\n" + "fadd z29.s, z29.s, z2.s\n" + "fadd z28.s, z28.s, z4.s\n" + "fmin z21.s, p3/M, z21.s, z12.s\n" + "fmin z20.s, p3/M, z20.s, z12.s\n" + "fadd z27.s, z27.s, z3.s\n" + "fadd z26.s, z26.s, z2.s\n" + "fmin z19.s, p3/M, z19.s, z12.s\n" + "fmin z18.s, p3/M, z18.s, z12.s\n" + "fmin z1.s, p3/M, z1.s, z12.s\n" + "fmin z0.s, p3/M, z0.s, z12.s\n" + "fmin z25.s, p3/M, z25.s, z12.s\n" + "fmin z24.s, p3/M, z24.s, z12.s\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmin z22.s, p3/M, z22.s, z12.s\n" + "fmin z31.s, p3/M, z31.s, z12.s\n" + "fmin z30.s, p3/M, z30.s, z12.s\n" + "fmin z29.s, p3/M, z29.s, z12.s\n" + "fmin z28.s, p3/M, z28.s, z12.s\n" + "fmin z27.s, p3/M, z27.s, z12.s\n" + "fmin z26.s, p3/M, z26.s, z12.s\n" + "fmax z17.s, p3/M, z17.s, z11.s\n" + "fmax z16.s, p3/M, z16.s, z11.s\n" + "fmax z21.s, p3/M, z21.s, z11.s\n" + "fmax z20.s, p3/M, z20.s, z11.s\n" + "fmax z19.s, p3/M, z19.s, z11.s\n" + "fmax z18.s, p3/M, z18.s, z11.s\n" + "fmax z1.s, p3/M, z1.s, z11.s\n" + "fmax z0.s, p3/M, z0.s, z11.s\n" + ".inst 0x658aae31 // bfcvt z17.h, p3/M, z17.s\n" + ".inst 0x658aae10 // bfcvt z16.h, p3/M, z16.s\n" + "fmax z25.s, p3/M, z25.s, z11.s\n" + "fmax z24.s, p3/M, z24.s, z11.s\n" + ".inst 0x658aaeb5 // bfcvt z21.h, p3/M, z21.s\n" + ".inst 0x658aae94 // bfcvt z20.h, p3/M, z20.s\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + "fmax z22.s, p3/M, z22.s, z11.s\n" + ".inst 0x658aae73 // bfcvt z19.h, p3/M, z19.s\n" + ".inst 0x658aae52 // bfcvt z18.h, p3/M, z18.s\n" + "fmax z31.s, p3/M, z31.s, z11.s\n" + "fmax z30.s, p3/M, z30.s, z11.s\n" + "st1h { z17.s }, p2, [x10]\n" + ".inst 0x658aac31 // bfcvt z17.h, p3/M, z1.s\n" + "fmax z29.s, p3/M, z29.s, z11.s\n" + "fmax z28.s, p3/M, z28.s, z11.s\n" + "st1h { z16.s }, p1, [x10, #1, MUL VL]\n" + ".inst 0x658aac10 // bfcvt z16.h, p3/M, z0.s\n" + "fmax z27.s, p3/M, z27.s, z11.s\n" + "fmax z26.s, p3/M, z26.s, z11.s\n" + "st1h { z21.s }, p0, [x10, #2, MUL VL]\n" + ".inst 0x658aaf39 // bfcvt z25.h, p3/M, z25.s\n" + "cmp x11, XZR\n" + "st1h { z20.s }, p2, [x28]\n" + ".inst 0x658aaf18 // bfcvt z24.h, p3/M, z24.s\n" + ".inst 0x658aaef7 // bfcvt z23.h, p3/M, z23.s\n" + "st1h { z19.s }, p1, [x28, #1, MUL VL]\n" + ".inst 0x658aaed6 // bfcvt z22.h, p3/M, z22.s\n" + ".inst 0x658aaff5 // bfcvt z21.h, p3/M, z31.s\n" + "inch x10, ALL, MUL #3\n" + "st1h { z18.s }, p0, [x28, #2, MUL VL]\n" + ".inst 0x658aafd4 // bfcvt z20.h, p3/M, z30.s\n" + ".inst 0x658aafb3 // bfcvt z19.h, p3/M, z29.s\n" + "inch x28, ALL, MUL #3\n" + "st1h { z17.s }, p2, [x27]\n" + ".inst 0x658aaf92 // bfcvt z18.h, p3/M, z28.s\n" + ".inst 0x658aaf71 // bfcvt z17.h, p3/M, z27.s\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "st1h { z16.s }, p1, [x27, #1, MUL VL]\n" + ".inst 0x658aaf50 // bfcvt z16.h, p3/M, z26.s\n" + "st1h { z25.s }, p0, [x27, #2, MUL VL]\n" + "inch x27, ALL, MUL #3\n" + "st1h { z24.s }, p2, [x26]\n" + "st1h { z23.s }, p1, [x26, #1, MUL VL]\n" + "st1h { z22.s }, p0, [x26, #2, MUL VL]\n" + "inch x26, ALL, MUL #3\n" + "st1h { z21.s }, p2, [x25]\n" + "st1h { z20.s }, p1, [x25, #1, MUL VL]\n" + "st1h { z19.s }, p0, [x25, #2, MUL VL]\n" + "inch x25, ALL, MUL #3\n" + "st1h { z18.s }, p2, [x24]\n" + "st1h { z17.s }, p1, [x24, #1, MUL VL]\n" + "st1h { z16.s }, p0, [x24, #2, MUL VL]\n" + "inch x24, ALL, MUL #3\n" + "bgt 23b\n" + "b 52f\n" + "26:" // Initial: Height 7 + "mov x10, %x[out_ptr]\n" + "mov x11, %x[cols]\n" + "mov x9, %x[bias]\n" + "add x28, x10, %x[ldout], LSL #1\n" + "add x27, x28, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "add x24, x25, %x[ldout], LSL #1\n" + "add x23, x24, %x[ldout], LSL #1\n" + "27:" // Initial: Height 7: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "whilelt p0.s, x21, x11\n" + "incw x21\n" + "cbnz %x[bias], 28f\n" + "mov z7.b, #0x0\n" + "mov z6.b, #0x0\n" + "mov z5.b, #0x0\n" + "b 29f\n" + "28:" // Initial: Height 7: Width 3: bias + "ld1h { z18.s }, p2/Z, [x9]\n" + "ld1h { z17.s }, p1/Z, [x9, #1, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x9, #2, MUL VL]\n" + "lsl z7.s, z18.s, #0x10\n" + "lsl z6.s, z17.s, #0x10\n" + "lsl z5.s, z16.s, #0x10\n" + "29:" // Initial: Height 7: Width 3: init done + "ld1w { z19.s }, p2/Z, [%x[in_ptr]]\n" + "ld1w { z18.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "decw x11, ALL, MUL #3\n" + "inch x9, ALL, MUL #3\n" + "ld1w { z17.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "ld1w { z16.s }, p2/Z, [%x[in_ptr], #3, MUL VL]\n" + "ld1w { z21.s }, p1/Z, [%x[in_ptr], #4, MUL VL]\n" + "ld1w { z20.s }, p0/Z, [%x[in_ptr], #5, MUL VL]\n" + "ld1w { z4.s }, p2/Z, [%x[in_ptr], #6, MUL VL]\n" + "ld1w { z3.s }, p1/Z, [%x[in_ptr], #7, MUL VL]\n" + "fadd z19.s, z19.s, z7.s\n" + "fadd z18.s, z18.s, z6.s\n" + "ld1w { z2.s }, p0/Z, [x20, #-8, MUL VL]\n" + "ld1w { z1.s }, p2/Z, [x20, #-7, MUL VL]\n" + "fadd z17.s, z17.s, z5.s\n" + "fadd z16.s, z16.s, z7.s\n" + "ld1w { z26.s }, p1/Z, [x20, #-6, MUL VL]\n" + "ld1w { z25.s }, p0/Z, [x20, #-5, MUL VL]\n" + "fadd z21.s, z21.s, z6.s\n" + "fadd z20.s, z20.s, z5.s\n" + "ld1w { z24.s }, p2/Z, [x20, #-4, MUL VL]\n" + "ld1w { z23.s }, p1/Z, [x20, #-3, MUL VL]\n" + "fadd z4.s, z4.s, z7.s\n" + "fadd z3.s, z3.s, z6.s\n" + "ld1w { z22.s }, p0/Z, [x20, #-2, MUL VL]\n" + "ld1w { z0.s }, p2/Z, [x20, #-1, MUL VL]\n" + "fadd z2.s, z2.s, z5.s\n" + "fadd z1.s, z1.s, z7.s\n" + "ld1w { z31.s }, p1/Z, [x20]\n" + "ld1w { z30.s }, p0/Z, [x20, #1, MUL VL]\n" + "fadd z26.s, z26.s, z6.s\n" + "fadd z25.s, z25.s, z5.s\n" + "ld1w { z29.s }, p2/Z, [x20, #2, MUL VL]\n" + "ld1w { z28.s }, p1/Z, [x20, #3, MUL VL]\n" + "fadd z24.s, z24.s, z7.s\n" + "fadd z23.s, z23.s, z6.s\n" + "ld1w { z27.s }, p0/Z, [x20, #4, MUL VL]\n" + "fadd z22.s, z22.s, z5.s\n" + "fadd z0.s, z0.s, z7.s\n" + "fmin z19.s, p3/M, z19.s, z12.s\n" + "fadd z31.s, z31.s, z6.s\n" + "fadd z30.s, z30.s, z5.s\n" + "fmin z18.s, p3/M, z18.s, z12.s\n" + "fmin z17.s, p3/M, z17.s, z12.s\n" + "fadd z29.s, z29.s, z7.s\n" + "fadd z28.s, z28.s, z6.s\n" + "fmin z16.s, p3/M, z16.s, z12.s\n" + "fmin z21.s, p3/M, z21.s, z12.s\n" + "fadd z27.s, z27.s, z5.s\n" + "fmin z20.s, p3/M, z20.s, z12.s\n" + "fmin z4.s, p3/M, z4.s, z12.s\n" + "fmin z3.s, p3/M, z3.s, z12.s\n" + "fmin z2.s, p3/M, z2.s, z12.s\n" + "fmin z1.s, p3/M, z1.s, z12.s\n" + "fmin z26.s, p3/M, z26.s, z12.s\n" + "fmin z25.s, p3/M, z25.s, z12.s\n" + "fmin z24.s, p3/M, z24.s, z12.s\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmin z22.s, p3/M, z22.s, z12.s\n" + "fmin z0.s, p3/M, z0.s, z12.s\n" + "fmin z31.s, p3/M, z31.s, z12.s\n" + "fmin z30.s, p3/M, z30.s, z12.s\n" + "fmin z29.s, p3/M, z29.s, z12.s\n" + "fmin z28.s, p3/M, z28.s, z12.s\n" + "fmin z27.s, p3/M, z27.s, z12.s\n" + "fmax z19.s, p3/M, z19.s, z11.s\n" + "fmax z18.s, p3/M, z18.s, z11.s\n" + "fmax z17.s, p3/M, z17.s, z11.s\n" + "fmax z16.s, p3/M, z16.s, z11.s\n" + "fmax z21.s, p3/M, z21.s, z11.s\n" + "fmax z20.s, p3/M, z20.s, z11.s\n" + "fmax z4.s, p3/M, z4.s, z11.s\n" + "fmax z3.s, p3/M, z3.s, z11.s\n" + ".inst 0x658aae73 // bfcvt z19.h, p3/M, z19.s\n" + ".inst 0x658aae52 // bfcvt z18.h, p3/M, z18.s\n" + "fmax z2.s, p3/M, z2.s, z11.s\n" + "fmax z1.s, p3/M, z1.s, z11.s\n" + ".inst 0x658aae31 // bfcvt z17.h, p3/M, z17.s\n" + ".inst 0x658aae10 // bfcvt z16.h, p3/M, z16.s\n" + "fmax z26.s, p3/M, z26.s, z11.s\n" + "fmax z25.s, p3/M, z25.s, z11.s\n" + ".inst 0x658aaeb5 // bfcvt z21.h, p3/M, z21.s\n" + ".inst 0x658aae94 // bfcvt z20.h, p3/M, z20.s\n" + "fmax z24.s, p3/M, z24.s, z11.s\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + "st1h { z19.s }, p2, [x10]\n" + ".inst 0x658aac93 // bfcvt z19.h, p3/M, z4.s\n" + "fmax z22.s, p3/M, z22.s, z11.s\n" + "fmax z0.s, p3/M, z0.s, z11.s\n" + "st1h { z18.s }, p1, [x10, #1, MUL VL]\n" + ".inst 0x658aac72 // bfcvt z18.h, p3/M, z3.s\n" + "fmax z31.s, p3/M, z31.s, z11.s\n" + "fmax z30.s, p3/M, z30.s, z11.s\n" + "st1h { z17.s }, p0, [x10, #2, MUL VL]\n" + ".inst 0x658aac51 // bfcvt z17.h, p3/M, z2.s\n" + "fmax z29.s, p3/M, z29.s, z11.s\n" + "fmax z28.s, p3/M, z28.s, z11.s\n" + "st1h { z16.s }, p2, [x28]\n" + ".inst 0x658aac30 // bfcvt z16.h, p3/M, z1.s\n" + "fmax z27.s, p3/M, z27.s, z11.s\n" + "cmp x11, XZR\n" + "st1h { z21.s }, p1, [x28, #1, MUL VL]\n" + ".inst 0x658aaf5a // bfcvt z26.h, p3/M, z26.s\n" + "st1h { z20.s }, p0, [x28, #2, MUL VL]\n" + ".inst 0x658aaf39 // bfcvt z25.h, p3/M, z25.s\n" + ".inst 0x658aaf18 // bfcvt z24.h, p3/M, z24.s\n" + "inch x10, ALL, MUL #3\n" + "st1h { z19.s }, p2, [x27]\n" + ".inst 0x658aaef7 // bfcvt z23.h, p3/M, z23.s\n" + ".inst 0x658aaed6 // bfcvt z22.h, p3/M, z22.s\n" + "inch x28, ALL, MUL #3\n" + "st1h { z18.s }, p1, [x27, #1, MUL VL]\n" + ".inst 0x658aac15 // bfcvt z21.h, p3/M, z0.s\n" + ".inst 0x658aaff4 // bfcvt z20.h, p3/M, z31.s\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "st1h { z17.s }, p0, [x27, #2, MUL VL]\n" + ".inst 0x658aafd3 // bfcvt z19.h, p3/M, z30.s\n" + ".inst 0x658aafb2 // bfcvt z18.h, p3/M, z29.s\n" + "inch x27, ALL, MUL #3\n" + "st1h { z16.s }, p2, [x26]\n" + ".inst 0x658aaf91 // bfcvt z17.h, p3/M, z28.s\n" + ".inst 0x658aaf70 // bfcvt z16.h, p3/M, z27.s\n" + "st1h { z26.s }, p1, [x26, #1, MUL VL]\n" + "st1h { z25.s }, p0, [x26, #2, MUL VL]\n" + "inch x26, ALL, MUL #3\n" + "st1h { z24.s }, p2, [x25]\n" + "st1h { z23.s }, p1, [x25, #1, MUL VL]\n" + "st1h { z22.s }, p0, [x25, #2, MUL VL]\n" + "inch x25, ALL, MUL #3\n" + "st1h { z21.s }, p2, [x24]\n" + "st1h { z20.s }, p1, [x24, #1, MUL VL]\n" + "st1h { z19.s }, p0, [x24, #2, MUL VL]\n" + "inch x24, ALL, MUL #3\n" + "st1h { z18.s }, p2, [x23]\n" + "st1h { z17.s }, p1, [x23, #1, MUL VL]\n" + "st1h { z16.s }, p0, [x23, #2, MUL VL]\n" + "inch x23, ALL, MUL #3\n" + "bgt 27b\n" + "b 52f\n" + "30:" // Initial: Height 8 + "mov x10, %x[out_ptr]\n" + "mov x11, %x[cols]\n" + "mov x9, %x[bias]\n" + "add x28, x10, %x[ldout], LSL #1\n" + "add x27, x28, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "add x24, x25, %x[ldout], LSL #1\n" + "add x23, x24, %x[ldout], LSL #1\n" + "add x22, x23, %x[ldout], LSL #1\n" + "31:" // Initial: Height 8: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "whilelt p0.s, x21, x11\n" + "incw x21\n" + "cbnz %x[bias], 32f\n" + "mov z10.b, #0x0\n" + "mov z9.b, #0x0\n" + "mov z8.b, #0x0\n" + "b 33f\n" + "32:" // Initial: Height 8: Width 3: bias + "ld1h { z18.s }, p2/Z, [x9]\n" + "ld1h { z17.s }, p1/Z, [x9, #1, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x9, #2, MUL VL]\n" + "lsl z10.s, z18.s, #0x10\n" + "lsl z9.s, z17.s, #0x10\n" + "lsl z8.s, z16.s, #0x10\n" + "33:" // Initial: Height 8: Width 3: init done + "ld1w { z21.s }, p2/Z, [%x[in_ptr]]\n" + "ld1w { z20.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "decw x11, ALL, MUL #3\n" + "inch x9, ALL, MUL #3\n" + "ld1w { z19.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "ld1w { z18.s }, p2/Z, [%x[in_ptr], #3, MUL VL]\n" + "ld1w { z17.s }, p1/Z, [%x[in_ptr], #4, MUL VL]\n" + "ld1w { z16.s }, p0/Z, [%x[in_ptr], #5, MUL VL]\n" + "ld1w { z7.s }, p2/Z, [%x[in_ptr], #6, MUL VL]\n" + "ld1w { z6.s }, p1/Z, [%x[in_ptr], #7, MUL VL]\n" + "fadd z21.s, z21.s, z10.s\n" + "fadd z20.s, z20.s, z9.s\n" + "ld1w { z5.s }, p0/Z, [x20, #-8, MUL VL]\n" + "ld1w { z4.s }, p2/Z, [x20, #-7, MUL VL]\n" + "fadd z19.s, z19.s, z8.s\n" + "fadd z18.s, z18.s, z10.s\n" + "ld1w { z3.s }, p1/Z, [x20, #-6, MUL VL]\n" + "ld1w { z2.s }, p0/Z, [x20, #-5, MUL VL]\n" + "fadd z17.s, z17.s, z9.s\n" + "fadd z16.s, z16.s, z8.s\n" + "ld1w { z27.s }, p2/Z, [x20, #-4, MUL VL]\n" + "ld1w { z26.s }, p1/Z, [x20, #-3, MUL VL]\n" + "fadd z7.s, z7.s, z10.s\n" + "fadd z6.s, z6.s, z9.s\n" + "ld1w { z25.s }, p0/Z, [x20, #-2, MUL VL]\n" + "ld1w { z24.s }, p2/Z, [x20, #-1, MUL VL]\n" + "fadd z5.s, z5.s, z8.s\n" + "fadd z4.s, z4.s, z10.s\n" + "ld1w { z23.s }, p1/Z, [x20]\n" + "ld1w { z22.s }, p0/Z, [x20, #1, MUL VL]\n" + "fadd z3.s, z3.s, z9.s\n" + "fadd z2.s, z2.s, z8.s\n" + "ld1w { z1.s }, p2/Z, [x20, #2, MUL VL]\n" + "ld1w { z0.s }, p1/Z, [x20, #3, MUL VL]\n" + "fadd z27.s, z27.s, z10.s\n" + "fadd z26.s, z26.s, z9.s\n" + "ld1w { z31.s }, p0/Z, [x20, #4, MUL VL]\n" + "ld1w { z30.s }, p2/Z, [x20, #5, MUL VL]\n" + "fadd z25.s, z25.s, z8.s\n" + "fadd z24.s, z24.s, z10.s\n" + "ld1w { z29.s }, p1/Z, [x20, #6, MUL VL]\n" + "ld1w { z28.s }, p0/Z, [x20, #7, MUL VL]\n" + "fadd z23.s, z23.s, z9.s\n" + "fadd z22.s, z22.s, z8.s\n" + "fadd z1.s, z1.s, z10.s\n" + "fadd z0.s, z0.s, z9.s\n" + "fmin z21.s, p3/M, z21.s, z12.s\n" + "fmin z20.s, p3/M, z20.s, z12.s\n" + "fadd z31.s, z31.s, z8.s\n" + "fadd z30.s, z30.s, z10.s\n" + "fmin z19.s, p3/M, z19.s, z12.s\n" + "fmin z18.s, p3/M, z18.s, z12.s\n" + "fadd z29.s, z29.s, z9.s\n" + "fadd z28.s, z28.s, z8.s\n" + "fmin z17.s, p3/M, z17.s, z12.s\n" + "fmin z16.s, p3/M, z16.s, z12.s\n" + "fmin z7.s, p3/M, z7.s, z12.s\n" + "fmin z6.s, p3/M, z6.s, z12.s\n" + "fmin z5.s, p3/M, z5.s, z12.s\n" + "fmin z4.s, p3/M, z4.s, z12.s\n" + "fmin z3.s, p3/M, z3.s, z12.s\n" + "fmin z2.s, p3/M, z2.s, z12.s\n" + "fmin z27.s, p3/M, z27.s, z12.s\n" + "fmin z26.s, p3/M, z26.s, z12.s\n" + "fmin z25.s, p3/M, z25.s, z12.s\n" + "fmin z24.s, p3/M, z24.s, z12.s\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmin z22.s, p3/M, z22.s, z12.s\n" + "fmin z1.s, p3/M, z1.s, z12.s\n" + "fmin z0.s, p3/M, z0.s, z12.s\n" + "fmin z31.s, p3/M, z31.s, z12.s\n" + "fmin z30.s, p3/M, z30.s, z12.s\n" + "fmin z29.s, p3/M, z29.s, z12.s\n" + "fmin z28.s, p3/M, z28.s, z12.s\n" + "fmax z21.s, p3/M, z21.s, z11.s\n" + "fmax z20.s, p3/M, z20.s, z11.s\n" + "fmax z19.s, p3/M, z19.s, z11.s\n" + "fmax z18.s, p3/M, z18.s, z11.s\n" + "fmax z17.s, p3/M, z17.s, z11.s\n" + "fmax z16.s, p3/M, z16.s, z11.s\n" + "fmax z7.s, p3/M, z7.s, z11.s\n" + "fmax z6.s, p3/M, z6.s, z11.s\n" + ".inst 0x658aaeb5 // bfcvt z21.h, p3/M, z21.s\n" + ".inst 0x658aae94 // bfcvt z20.h, p3/M, z20.s\n" + "fmax z5.s, p3/M, z5.s, z11.s\n" + "fmax z4.s, p3/M, z4.s, z11.s\n" + ".inst 0x658aae73 // bfcvt z19.h, p3/M, z19.s\n" + ".inst 0x658aae52 // bfcvt z18.h, p3/M, z18.s\n" + "fmax z3.s, p3/M, z3.s, z11.s\n" + "fmax z2.s, p3/M, z2.s, z11.s\n" + ".inst 0x658aae31 // bfcvt z17.h, p3/M, z17.s\n" + ".inst 0x658aae10 // bfcvt z16.h, p3/M, z16.s\n" + "fmax z27.s, p3/M, z27.s, z11.s\n" + "fmax z26.s, p3/M, z26.s, z11.s\n" + "st1h { z21.s }, p2, [x10]\n" + ".inst 0x658aacf5 // bfcvt z21.h, p3/M, z7.s\n" + "fmax z25.s, p3/M, z25.s, z11.s\n" + "fmax z24.s, p3/M, z24.s, z11.s\n" + "st1h { z20.s }, p1, [x10, #1, MUL VL]\n" + ".inst 0x658aacd4 // bfcvt z20.h, p3/M, z6.s\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + "fmax z22.s, p3/M, z22.s, z11.s\n" + "st1h { z19.s }, p0, [x10, #2, MUL VL]\n" + ".inst 0x658aacb3 // bfcvt z19.h, p3/M, z5.s\n" + "fmax z1.s, p3/M, z1.s, z11.s\n" + "fmax z0.s, p3/M, z0.s, z11.s\n" + "st1h { z18.s }, p2, [x28]\n" + ".inst 0x658aac92 // bfcvt z18.h, p3/M, z4.s\n" + "fmax z31.s, p3/M, z31.s, z11.s\n" + "fmax z30.s, p3/M, z30.s, z11.s\n" + "st1h { z17.s }, p1, [x28, #1, MUL VL]\n" + ".inst 0x658aac71 // bfcvt z17.h, p3/M, z3.s\n" + "fmax z29.s, p3/M, z29.s, z11.s\n" + "fmax z28.s, p3/M, z28.s, z11.s\n" + "st1h { z16.s }, p0, [x28, #2, MUL VL]\n" + ".inst 0x658aac50 // bfcvt z16.h, p3/M, z2.s\n" + "cmp x11, XZR\n" + "st1h { z21.s }, p2, [x27]\n" + ".inst 0x658aaf7b // bfcvt z27.h, p3/M, z27.s\n" + ".inst 0x658aaf5a // bfcvt z26.h, p3/M, z26.s\n" + "st1h { z20.s }, p1, [x27, #1, MUL VL]\n" + ".inst 0x658aaf39 // bfcvt z25.h, p3/M, z25.s\n" + ".inst 0x658aaf18 // bfcvt z24.h, p3/M, z24.s\n" + "inch x10, ALL, MUL #3\n" + "st1h { z19.s }, p0, [x27, #2, MUL VL]\n" + ".inst 0x658aaef7 // bfcvt z23.h, p3/M, z23.s\n" + ".inst 0x658aaed6 // bfcvt z22.h, p3/M, z22.s\n" + "inch x28, ALL, MUL #3\n" + "st1h { z18.s }, p2, [x26]\n" + ".inst 0x658aac35 // bfcvt z21.h, p3/M, z1.s\n" + ".inst 0x658aac14 // bfcvt z20.h, p3/M, z0.s\n" + "inch x27, ALL, MUL #3\n" + "st1h { z17.s }, p1, [x26, #1, MUL VL]\n" + ".inst 0x658aaff3 // bfcvt z19.h, p3/M, z31.s\n" + ".inst 0x658aafd2 // bfcvt z18.h, p3/M, z30.s\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "st1h { z16.s }, p0, [x26, #2, MUL VL]\n" + ".inst 0x658aafb1 // bfcvt z17.h, p3/M, z29.s\n" + ".inst 0x658aaf90 // bfcvt z16.h, p3/M, z28.s\n" + "inch x26, ALL, MUL #3\n" + "st1h { z27.s }, p2, [x25]\n" + "st1h { z26.s }, p1, [x25, #1, MUL VL]\n" + "st1h { z25.s }, p0, [x25, #2, MUL VL]\n" + "inch x25, ALL, MUL #3\n" + "st1h { z24.s }, p2, [x24]\n" + "st1h { z23.s }, p1, [x24, #1, MUL VL]\n" + "st1h { z22.s }, p0, [x24, #2, MUL VL]\n" + "inch x24, ALL, MUL #3\n" + "st1h { z21.s }, p2, [x23]\n" + "st1h { z20.s }, p1, [x23, #1, MUL VL]\n" + "st1h { z19.s }, p0, [x23, #2, MUL VL]\n" + "inch x23, ALL, MUL #3\n" + "st1h { z18.s }, p2, [x22]\n" + "st1h { z17.s }, p1, [x22, #1, MUL VL]\n" + "st1h { z16.s }, p0, [x22, #2, MUL VL]\n" + "inch x22, ALL, MUL #3\n" + "bgt 31b\n" + "subs %x[rows], %x[rows], #0x8\n" + "add %x[out_ptr], %x[out_ptr], x12\n" + "bgt 1b\n" + "b 52f\n" + "34:" // Accumulate + "35:" // Accumulate: Row loop + "cmp %x[rows], #0x7\n" + "bgt 50f\n" + "beq 48f\n" + "cmp %x[rows], #0x5\n" + "bgt 46f\n" + "beq 44f\n" + "cmp %x[rows], #0x3\n" + "bgt 42f\n" + "beq 40f\n" + "cmp %x[rows], #0x1\n" + "bgt 38f\n" + "36:" // Accumulate: Height 1 + "mov x11, %x[cols]\n" + "mov x10, %x[out_ptr]\n" + "37:" // Accumulate: Height 1: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "ld1h { z16.s }, p2/Z, [x10]\n" + "ld1w { z19.s }, p2/Z, [%x[in_ptr]]\n" + "lsl z16.s, z16.s, #0x10\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "fadd z19.s, z19.s, z16.s\n" + "fmin z19.s, p3/M, z19.s, z12.s\n" + "ld1w { z18.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "whilelt p0.s, x21, x11\n" + "decw x11, ALL, MUL #3\n" + "incw x21\n" + "fmax z19.s, p3/M, z19.s, z11.s\n" + "ld1w { z17.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "cmp x11, XZR\n" + ".inst 0x658aae70 // bfcvt z16.h, p3/M, z19.s\n" + "st1h { z16.s }, p2, [x10]\n" + "ld1h { z16.s }, p1/Z, [x10, #1, MUL VL]\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z18.s, z18.s, z16.s\n" + "fmin z18.s, p3/M, z18.s, z12.s\n" + "fmax z18.s, p3/M, z18.s, z11.s\n" + ".inst 0x658aae50 // bfcvt z16.h, p3/M, z18.s\n" + "st1h { z16.s }, p1, [x10, #1, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x10, #2, MUL VL]\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z17.s, z17.s, z16.s\n" + "fmin z17.s, p3/M, z17.s, z12.s\n" + "fmax z17.s, p3/M, z17.s, z11.s\n" + ".inst 0x658aae30 // bfcvt z16.h, p3/M, z17.s\n" + "st1h { z16.s }, p0, [x10, #2, MUL VL]\n" + "inch x10, ALL, MUL #3\n" + "bgt 37b\n" + "b 52f\n" + "38:" // Accumulate: Height 2 + "mov x10, %x[out_ptr]\n" + "mov x11, %x[cols]\n" + "add x28, x10, %x[ldout], LSL #1\n" + "39:" // Accumulate: Height 2: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "ld1h { z17.s }, p2/Z, [x10]\n" + "ld1h { z16.s }, p2/Z, [x28]\n" + "ld1w { z23.s }, p2/Z, [%x[in_ptr]]\n" + "lsl z17.s, z17.s, #0x10\n" + "ld1w { z22.s }, p2/Z, [%x[in_ptr], #3, MUL VL]\n" + "lsl z16.s, z16.s, #0x10\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "fadd z23.s, z23.s, z17.s\n" + "fadd z22.s, z22.s, z16.s\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmin z22.s, p3/M, z22.s, z12.s\n" + "ld1w { z21.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "ld1w { z20.s }, p1/Z, [%x[in_ptr], #4, MUL VL]\n" + "whilelt p0.s, x21, x11\n" + "decw x11, ALL, MUL #3\n" + "incw x21\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + "fmax z22.s, p3/M, z22.s, z11.s\n" + "ld1w { z19.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "ld1w { z18.s }, p0/Z, [%x[in_ptr], #5, MUL VL]\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "cmp x11, XZR\n" + ".inst 0x658aaef1 // bfcvt z17.h, p3/M, z23.s\n" + ".inst 0x658aaed0 // bfcvt z16.h, p3/M, z22.s\n" + "st1h { z17.s }, p2, [x10]\n" + "st1h { z16.s }, p2, [x28]\n" + "ld1h { z17.s }, p1/Z, [x10, #1, MUL VL]\n" + "ld1h { z16.s }, p1/Z, [x28, #1, MUL VL]\n" + "lsl z17.s, z17.s, #0x10\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z21.s, z21.s, z17.s\n" + "fadd z20.s, z20.s, z16.s\n" + "fmin z21.s, p3/M, z21.s, z12.s\n" + "fmin z20.s, p3/M, z20.s, z12.s\n" + "fmax z21.s, p3/M, z21.s, z11.s\n" + "fmax z20.s, p3/M, z20.s, z11.s\n" + ".inst 0x658aaeb0 // bfcvt z16.h, p3/M, z21.s\n" + "st1h { z16.s }, p1, [x10, #1, MUL VL]\n" + ".inst 0x658aae90 // bfcvt z16.h, p3/M, z20.s\n" + "ld1h { z17.s }, p0/Z, [x10, #2, MUL VL]\n" + "st1h { z16.s }, p1, [x28, #1, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x28, #2, MUL VL]\n" + "lsl z17.s, z17.s, #0x10\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z19.s, z19.s, z17.s\n" + "fadd z18.s, z18.s, z16.s\n" + "fmin z19.s, p3/M, z19.s, z12.s\n" + "fmin z18.s, p3/M, z18.s, z12.s\n" + "fmax z19.s, p3/M, z19.s, z11.s\n" + "fmax z18.s, p3/M, z18.s, z11.s\n" + ".inst 0x658aae70 // bfcvt z16.h, p3/M, z19.s\n" + "st1h { z16.s }, p0, [x10, #2, MUL VL]\n" + "inch x10, ALL, MUL #3\n" + ".inst 0x658aae50 // bfcvt z16.h, p3/M, z18.s\n" + "st1h { z16.s }, p0, [x28, #2, MUL VL]\n" + "inch x28, ALL, MUL #3\n" + "bgt 39b\n" + "b 52f\n" + "40:" // Accumulate: Height 3 + "mov x10, %x[out_ptr]\n" + "mov x11, %x[cols]\n" + "add x28, x10, %x[ldout], LSL #1\n" + "add x27, x28, %x[ldout], LSL #1\n" + "41:" // Accumulate: Height 3: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "ld1h { z18.s }, p2/Z, [x10]\n" + "ld1h { z17.s }, p2/Z, [x28]\n" + "ld1h { z16.s }, p2/Z, [x27]\n" + "ld1w { z26.s }, p2/Z, [%x[in_ptr]]\n" + "lsl z19.s, z18.s, #0x10\n" + "ld1w { z25.s }, p2/Z, [%x[in_ptr], #3, MUL VL]\n" + "lsl z17.s, z17.s, #0x10\n" + "ld1w { z18.s }, p2/Z, [%x[in_ptr], #6, MUL VL]\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z26.s, z26.s, z19.s\n" + "fadd z25.s, z25.s, z17.s\n" + "ld1w { z24.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "ld1w { z23.s }, p1/Z, [%x[in_ptr], #4, MUL VL]\n" + "ld1w { z22.s }, p1/Z, [%x[in_ptr], #7, MUL VL]\n" + "fadd z18.s, z18.s, z16.s\n" + "fmin z26.s, p3/M, z26.s, z12.s\n" + "whilelt p0.s, x21, x11\n" + "decw x11, ALL, MUL #3\n" + "incw x21\n" + "fmin z25.s, p3/M, z25.s, z12.s\n" + "fmin z18.s, p3/M, z18.s, z12.s\n" + "fmax z26.s, p3/M, z26.s, z11.s\n" + "ld1w { z21.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "ld1w { z20.s }, p0/Z, [%x[in_ptr], #5, MUL VL]\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "ld1w { z19.s }, p0/Z, [x20, #-8, MUL VL]\n" + "cmp x11, XZR\n" + "fmax z25.s, p3/M, z25.s, z11.s\n" + "fmax z18.s, p3/M, z18.s, z11.s\n" + ".inst 0x658aaf51 // bfcvt z17.h, p3/M, z26.s\n" + ".inst 0x658aaf30 // bfcvt z16.h, p3/M, z25.s\n" + "st1h { z17.s }, p2, [x10]\n" + "st1h { z16.s }, p2, [x28]\n" + ".inst 0x658aae51 // bfcvt z17.h, p3/M, z18.s\n" + "ld1h { z16.s }, p1/Z, [x10, #1, MUL VL]\n" + "st1h { z17.s }, p2, [x27]\n" + "ld1h { z17.s }, p1/Z, [x28, #1, MUL VL]\n" + "lsl z18.s, z16.s, #0x10\n" + "ld1h { z16.s }, p1/Z, [x27, #1, MUL VL]\n" + "lsl z17.s, z17.s, #0x10\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z24.s, z24.s, z18.s\n" + "fadd z23.s, z23.s, z17.s\n" + "fadd z22.s, z22.s, z16.s\n" + "fmin z24.s, p3/M, z24.s, z12.s\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmin z22.s, p3/M, z22.s, z12.s\n" + "fmax z24.s, p3/M, z24.s, z11.s\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + "fmax z22.s, p3/M, z22.s, z11.s\n" + ".inst 0x658aaf10 // bfcvt z16.h, p3/M, z24.s\n" + "st1h { z16.s }, p1, [x10, #1, MUL VL]\n" + ".inst 0x658aaef2 // bfcvt z18.h, p3/M, z23.s\n" + ".inst 0x658aaed1 // bfcvt z17.h, p3/M, z22.s\n" + "ld1h { z16.s }, p0/Z, [x10, #2, MUL VL]\n" + "st1h { z18.s }, p1, [x28, #1, MUL VL]\n" + "st1h { z17.s }, p1, [x27, #1, MUL VL]\n" + "ld1h { z17.s }, p0/Z, [x28, #2, MUL VL]\n" + "lsl z18.s, z16.s, #0x10\n" + "ld1h { z16.s }, p0/Z, [x27, #2, MUL VL]\n" + "lsl z17.s, z17.s, #0x10\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z21.s, z21.s, z18.s\n" + "fadd z20.s, z20.s, z17.s\n" + "fadd z19.s, z19.s, z16.s\n" + "fmin z21.s, p3/M, z21.s, z12.s\n" + "fmin z20.s, p3/M, z20.s, z12.s\n" + "fmin z19.s, p3/M, z19.s, z12.s\n" + "fmax z21.s, p3/M, z21.s, z11.s\n" + "fmax z20.s, p3/M, z20.s, z11.s\n" + "fmax z19.s, p3/M, z19.s, z11.s\n" + ".inst 0x658aaeb0 // bfcvt z16.h, p3/M, z21.s\n" + "st1h { z16.s }, p0, [x10, #2, MUL VL]\n" + "inch x10, ALL, MUL #3\n" + ".inst 0x658aae91 // bfcvt z17.h, p3/M, z20.s\n" + ".inst 0x658aae70 // bfcvt z16.h, p3/M, z19.s\n" + "st1h { z17.s }, p0, [x28, #2, MUL VL]\n" + "inch x28, ALL, MUL #3\n" + "st1h { z16.s }, p0, [x27, #2, MUL VL]\n" + "inch x27, ALL, MUL #3\n" + "bgt 41b\n" + "b 52f\n" + "42:" // Accumulate: Height 4 + "mov x10, %x[out_ptr]\n" + "mov x11, %x[cols]\n" + "add x28, x10, %x[ldout], LSL #1\n" + "add x27, x28, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "43:" // Accumulate: Height 4: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "ld1h { z19.s }, p2/Z, [x10]\n" + "ld1h { z18.s }, p2/Z, [x28]\n" + "ld1h { z17.s }, p2/Z, [x27]\n" + "ld1h { z16.s }, p2/Z, [x26]\n" + "ld1w { z30.s }, p2/Z, [%x[in_ptr]]\n" + "lsl z20.s, z19.s, #0x10\n" + "ld1w { z29.s }, p2/Z, [%x[in_ptr], #3, MUL VL]\n" + "lsl z18.s, z18.s, #0x10\n" + "ld1w { z28.s }, p2/Z, [%x[in_ptr], #6, MUL VL]\n" + "ld1w { z19.s }, p2/Z, [x20, #-7, MUL VL]\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "lsl z17.s, z17.s, #0x10\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z30.s, z30.s, z20.s\n" + "fadd z29.s, z29.s, z18.s\n" + "ld1w { z27.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "ld1w { z26.s }, p1/Z, [%x[in_ptr], #4, MUL VL]\n" + "ld1w { z25.s }, p1/Z, [%x[in_ptr], #7, MUL VL]\n" + "ld1w { z24.s }, p1/Z, [x20, #-6, MUL VL]\n" + "whilelt p0.s, x21, x11\n" + "decw x11, ALL, MUL #3\n" + "fadd z28.s, z28.s, z17.s\n" + "fadd z19.s, z19.s, z16.s\n" + "incw x21\n" + "fmin z30.s, p3/M, z30.s, z12.s\n" + "fmin z29.s, p3/M, z29.s, z12.s\n" + "ld1w { z23.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "ld1w { z22.s }, p0/Z, [%x[in_ptr], #5, MUL VL]\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "ld1w { z21.s }, p0/Z, [x20, #-8, MUL VL]\n" + "ld1w { z20.s }, p0/Z, [x20, #-5, MUL VL]\n" + "cmp x11, XZR\n" + "fmin z28.s, p3/M, z28.s, z12.s\n" + "fmin z19.s, p3/M, z19.s, z12.s\n" + "fmax z30.s, p3/M, z30.s, z11.s\n" + "fmax z29.s, p3/M, z29.s, z11.s\n" + "fmax z28.s, p3/M, z28.s, z11.s\n" + "fmax z19.s, p3/M, z19.s, z11.s\n" + ".inst 0x658aafd2 // bfcvt z18.h, p3/M, z30.s\n" + ".inst 0x658aafb1 // bfcvt z17.h, p3/M, z29.s\n" + ".inst 0x658aaf90 // bfcvt z16.h, p3/M, z28.s\n" + "st1h { z18.s }, p2, [x10]\n" + "st1h { z17.s }, p2, [x28]\n" + ".inst 0x658aae71 // bfcvt z17.h, p3/M, z19.s\n" + "st1h { z16.s }, p2, [x27]\n" + "ld1h { z16.s }, p1/Z, [x10, #1, MUL VL]\n" + "st1h { z17.s }, p2, [x26]\n" + "ld1h { z18.s }, p1/Z, [x28, #1, MUL VL]\n" + "ld1h { z17.s }, p1/Z, [x27, #1, MUL VL]\n" + "lsl z19.s, z16.s, #0x10\n" + "ld1h { z16.s }, p1/Z, [x26, #1, MUL VL]\n" + "lsl z18.s, z18.s, #0x10\n" + "lsl z17.s, z17.s, #0x10\n" + "fadd z27.s, z27.s, z19.s\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z26.s, z26.s, z18.s\n" + "fadd z25.s, z25.s, z17.s\n" + "fadd z24.s, z24.s, z16.s\n" + "fmin z27.s, p3/M, z27.s, z12.s\n" + "fmin z26.s, p3/M, z26.s, z12.s\n" + "fmin z25.s, p3/M, z25.s, z12.s\n" + "fmin z24.s, p3/M, z24.s, z12.s\n" + "fmax z27.s, p3/M, z27.s, z11.s\n" + "fmax z26.s, p3/M, z26.s, z11.s\n" + "fmax z25.s, p3/M, z25.s, z11.s\n" + "fmax z24.s, p3/M, z24.s, z11.s\n" + ".inst 0x658aaf71 // bfcvt z17.h, p3/M, z27.s\n" + ".inst 0x658aaf50 // bfcvt z16.h, p3/M, z26.s\n" + "st1h { z17.s }, p1, [x10, #1, MUL VL]\n" + "st1h { z16.s }, p1, [x28, #1, MUL VL]\n" + ".inst 0x658aaf32 // bfcvt z18.h, p3/M, z25.s\n" + ".inst 0x658aaf11 // bfcvt z17.h, p3/M, z24.s\n" + "ld1h { z16.s }, p0/Z, [x10, #2, MUL VL]\n" + "st1h { z18.s }, p1, [x27, #1, MUL VL]\n" + "st1h { z17.s }, p1, [x26, #1, MUL VL]\n" + "ld1h { z18.s }, p0/Z, [x28, #2, MUL VL]\n" + "lsl z19.s, z16.s, #0x10\n" + "ld1h { z17.s }, p0/Z, [x27, #2, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x26, #2, MUL VL]\n" + "lsl z18.s, z18.s, #0x10\n" + "lsl z17.s, z17.s, #0x10\n" + "fadd z23.s, z23.s, z19.s\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z22.s, z22.s, z18.s\n" + "fadd z21.s, z21.s, z17.s\n" + "fadd z20.s, z20.s, z16.s\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmin z22.s, p3/M, z22.s, z12.s\n" + "fmin z21.s, p3/M, z21.s, z12.s\n" + "fmin z20.s, p3/M, z20.s, z12.s\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + "fmax z22.s, p3/M, z22.s, z11.s\n" + "fmax z21.s, p3/M, z21.s, z11.s\n" + "fmax z20.s, p3/M, z20.s, z11.s\n" + ".inst 0x658aaef1 // bfcvt z17.h, p3/M, z23.s\n" + ".inst 0x658aaed0 // bfcvt z16.h, p3/M, z22.s\n" + "st1h { z17.s }, p0, [x10, #2, MUL VL]\n" + "inch x10, ALL, MUL #3\n" + "st1h { z16.s }, p0, [x28, #2, MUL VL]\n" + "inch x28, ALL, MUL #3\n" + ".inst 0x658aaeb1 // bfcvt z17.h, p3/M, z21.s\n" + ".inst 0x658aae90 // bfcvt z16.h, p3/M, z20.s\n" + "st1h { z17.s }, p0, [x27, #2, MUL VL]\n" + "inch x27, ALL, MUL #3\n" + "st1h { z16.s }, p0, [x26, #2, MUL VL]\n" + "inch x26, ALL, MUL #3\n" + "bgt 43b\n" + "b 52f\n" + "44:" // Accumulate: Height 5 + "mov x10, %x[out_ptr]\n" + "mov x11, %x[cols]\n" + "add x28, x10, %x[ldout], LSL #1\n" + "add x27, x28, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "45:" // Accumulate: Height 5: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "ld1h { z20.s }, p2/Z, [x10]\n" + "ld1h { z19.s }, p2/Z, [x28]\n" + "ld1h { z18.s }, p2/Z, [x27]\n" + "ld1h { z17.s }, p2/Z, [x26]\n" + "ld1h { z16.s }, p2/Z, [x25]\n" + "ld1w { z1.s }, p2/Z, [%x[in_ptr]]\n" + "lsl z22.s, z20.s, #0x10\n" + "ld1w { z0.s }, p2/Z, [%x[in_ptr], #3, MUL VL]\n" + "lsl z21.s, z19.s, #0x10\n" + "ld1w { z31.s }, p2/Z, [%x[in_ptr], #6, MUL VL]\n" + "whilelt p1.s, x21, x11\n" + "lsl z19.s, z18.s, #0x10\n" + "ld1w { z20.s }, p2/Z, [x20, #-7, MUL VL]\n" + "lsl z18.s, z17.s, #0x10\n" + "ld1w { z17.s }, p2/Z, [x20, #-4, MUL VL]\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z1.s, z1.s, z22.s\n" + "incw x21\n" + "fadd z0.s, z0.s, z21.s\n" + "ld1w { z30.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "ld1w { z29.s }, p1/Z, [%x[in_ptr], #4, MUL VL]\n" + "fadd z31.s, z31.s, z19.s\n" + "fadd z20.s, z20.s, z18.s\n" + "ld1w { z28.s }, p1/Z, [%x[in_ptr], #7, MUL VL]\n" + "ld1w { z27.s }, p1/Z, [x20, #-6, MUL VL]\n" + "fadd z17.s, z17.s, z16.s\n" + "fmin z1.s, p3/M, z1.s, z12.s\n" + "ld1w { z26.s }, p1/Z, [x20, #-3, MUL VL]\n" + "whilelt p0.s, x21, x11\n" + "fmin z0.s, p3/M, z0.s, z12.s\n" + "fmin z31.s, p3/M, z31.s, z12.s\n" + "fmin z20.s, p3/M, z20.s, z12.s\n" + "fmin z17.s, p3/M, z17.s, z12.s\n" + "fmax z1.s, p3/M, z1.s, z11.s\n" + "ld1w { z25.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "ld1w { z24.s }, p0/Z, [%x[in_ptr], #5, MUL VL]\n" + "decw x11, ALL, MUL #3\n" + "fmax z0.s, p3/M, z0.s, z11.s\n" + "fmax z31.s, p3/M, z31.s, z11.s\n" + "ld1w { z23.s }, p0/Z, [x20, #-8, MUL VL]\n" + "ld1w { z22.s }, p0/Z, [x20, #-5, MUL VL]\n" + "fmax z20.s, p3/M, z20.s, z11.s\n" + "fmax z17.s, p3/M, z17.s, z11.s\n" + "ld1w { z21.s }, p0/Z, [x20, #-2, MUL VL]\n" + ".inst 0x658aac30 // bfcvt z16.h, p3/M, z1.s\n" + "cmp x11, XZR\n" + "incw x21\n" + ".inst 0x658aac13 // bfcvt z19.h, p3/M, z0.s\n" + ".inst 0x658aaff2 // bfcvt z18.h, p3/M, z31.s\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "st1h { z16.s }, p2, [x10]\n" + ".inst 0x658aae90 // bfcvt z16.h, p3/M, z20.s\n" + ".inst 0x658aae31 // bfcvt z17.h, p3/M, z17.s\n" + "st1h { z19.s }, p2, [x28]\n" + "st1h { z18.s }, p2, [x27]\n" + "st1h { z16.s }, p2, [x26]\n" + "ld1h { z16.s }, p1/Z, [x10, #1, MUL VL]\n" + "st1h { z17.s }, p2, [x25]\n" + "ld1h { z19.s }, p1/Z, [x28, #1, MUL VL]\n" + "ld1h { z18.s }, p1/Z, [x27, #1, MUL VL]\n" + "ld1h { z17.s }, p1/Z, [x26, #1, MUL VL]\n" + "lsl z20.s, z16.s, #0x10\n" + "ld1h { z16.s }, p1/Z, [x25, #1, MUL VL]\n" + "lsl z19.s, z19.s, #0x10\n" + "lsl z18.s, z18.s, #0x10\n" + "lsl z17.s, z17.s, #0x10\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z30.s, z30.s, z20.s\n" + "fadd z29.s, z29.s, z19.s\n" + "fadd z28.s, z28.s, z18.s\n" + "fadd z27.s, z27.s, z17.s\n" + "fadd z26.s, z26.s, z16.s\n" + "fmin z30.s, p3/M, z30.s, z12.s\n" + "fmin z29.s, p3/M, z29.s, z12.s\n" + "fmin z28.s, p3/M, z28.s, z12.s\n" + "fmin z27.s, p3/M, z27.s, z12.s\n" + "fmin z26.s, p3/M, z26.s, z12.s\n" + "fmax z30.s, p3/M, z30.s, z11.s\n" + "fmax z29.s, p3/M, z29.s, z11.s\n" + "fmax z28.s, p3/M, z28.s, z11.s\n" + "fmax z27.s, p3/M, z27.s, z11.s\n" + "fmax z26.s, p3/M, z26.s, z11.s\n" + ".inst 0x658aafd2 // bfcvt z18.h, p3/M, z30.s\n" + ".inst 0x658aafb1 // bfcvt z17.h, p3/M, z29.s\n" + ".inst 0x658aaf90 // bfcvt z16.h, p3/M, z28.s\n" + "st1h { z18.s }, p1, [x10, #1, MUL VL]\n" + "st1h { z17.s }, p1, [x28, #1, MUL VL]\n" + ".inst 0x658aaf72 // bfcvt z18.h, p3/M, z27.s\n" + ".inst 0x658aaf51 // bfcvt z17.h, p3/M, z26.s\n" + "st1h { z16.s }, p1, [x27, #1, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x10, #2, MUL VL]\n" + "st1h { z18.s }, p1, [x26, #1, MUL VL]\n" + "st1h { z17.s }, p1, [x25, #1, MUL VL]\n" + "ld1h { z19.s }, p0/Z, [x28, #2, MUL VL]\n" + "ld1h { z18.s }, p0/Z, [x27, #2, MUL VL]\n" + "lsl z20.s, z16.s, #0x10\n" + "ld1h { z17.s }, p0/Z, [x26, #2, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x25, #2, MUL VL]\n" + "lsl z19.s, z19.s, #0x10\n" + "lsl z18.s, z18.s, #0x10\n" + "fadd z25.s, z25.s, z20.s\n" + "lsl z17.s, z17.s, #0x10\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z24.s, z24.s, z19.s\n" + "fadd z23.s, z23.s, z18.s\n" + "fadd z22.s, z22.s, z17.s\n" + "fmin z25.s, p3/M, z25.s, z12.s\n" + "fadd z21.s, z21.s, z16.s\n" + "fmin z24.s, p3/M, z24.s, z12.s\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmin z22.s, p3/M, z22.s, z12.s\n" + "fmax z25.s, p3/M, z25.s, z11.s\n" + "fmin z21.s, p3/M, z21.s, z12.s\n" + "fmax z24.s, p3/M, z24.s, z11.s\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + "fmax z22.s, p3/M, z22.s, z11.s\n" + ".inst 0x658aaf31 // bfcvt z17.h, p3/M, z25.s\n" + "fmax z21.s, p3/M, z21.s, z11.s\n" + ".inst 0x658aaf10 // bfcvt z16.h, p3/M, z24.s\n" + "st1h { z17.s }, p0, [x10, #2, MUL VL]\n" + "inch x10, ALL, MUL #3\n" + ".inst 0x658aaef2 // bfcvt z18.h, p3/M, z23.s\n" + ".inst 0x658aaed1 // bfcvt z17.h, p3/M, z22.s\n" + "st1h { z16.s }, p0, [x28, #2, MUL VL]\n" + "inch x28, ALL, MUL #3\n" + ".inst 0x658aaeb0 // bfcvt z16.h, p3/M, z21.s\n" + "st1h { z18.s }, p0, [x27, #2, MUL VL]\n" + "inch x27, ALL, MUL #3\n" + "st1h { z17.s }, p0, [x26, #2, MUL VL]\n" + "inch x26, ALL, MUL #3\n" + "st1h { z16.s }, p0, [x25, #2, MUL VL]\n" + "inch x25, ALL, MUL #3\n" + "bgt 45b\n" + "b 52f\n" + "46:" // Accumulate: Height 6 + "mov x10, %x[out_ptr]\n" + "mov x11, %x[cols]\n" + "add x28, x10, %x[ldout], LSL #1\n" + "add x27, x28, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "add x24, x25, %x[ldout], LSL #1\n" + "47:" // Accumulate: Height 6: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "ld1h { z21.s }, p2/Z, [x10]\n" + "ld1h { z20.s }, p2/Z, [x28]\n" + "ld1h { z19.s }, p2/Z, [x27]\n" + "ld1h { z18.s }, p2/Z, [x26]\n" + "ld1h { z17.s }, p2/Z, [x25]\n" + "ld1h { z16.s }, p2/Z, [x24]\n" + "ld1w { z6.s }, p2/Z, [%x[in_ptr]]\n" + "lsl z22.s, z21.s, #0x10\n" + "ld1w { z5.s }, p2/Z, [%x[in_ptr], #3, MUL VL]\n" + "lsl z21.s, z20.s, #0x10\n" + "ld1w { z4.s }, p2/Z, [%x[in_ptr], #6, MUL VL]\n" + "lsl z20.s, z19.s, #0x10\n" + "ld1w { z3.s }, p2/Z, [x20, #-7, MUL VL]\n" + "lsl z19.s, z18.s, #0x10\n" + "ld1w { z2.s }, p2/Z, [x20, #-4, MUL VL]\n" + "lsl z17.s, z17.s, #0x10\n" + "ld1w { z18.s }, p2/Z, [x20, #-1, MUL VL]\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z6.s, z6.s, z22.s\n" + "fadd z5.s, z5.s, z21.s\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "fadd z4.s, z4.s, z20.s\n" + "fadd z3.s, z3.s, z19.s\n" + "fadd z2.s, z2.s, z17.s\n" + "fadd z18.s, z18.s, z16.s\n" + "fmin z6.s, p3/M, z6.s, z12.s\n" + "fmin z5.s, p3/M, z5.s, z12.s\n" + "ld1w { z1.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "ld1w { z0.s }, p1/Z, [%x[in_ptr], #4, MUL VL]\n" + "whilelt p0.s, x21, x11\n" + "decw x11, ALL, MUL #3\n" + "fmin z4.s, p3/M, z4.s, z12.s\n" + "fmin z3.s, p3/M, z3.s, z12.s\n" + "ld1w { z31.s }, p1/Z, [%x[in_ptr], #7, MUL VL]\n" + "ld1w { z30.s }, p1/Z, [x20, #-6, MUL VL]\n" + "fmin z2.s, p3/M, z2.s, z12.s\n" + "fmin z18.s, p3/M, z18.s, z12.s\n" + "ld1w { z29.s }, p1/Z, [x20, #-3, MUL VL]\n" + "ld1w { z28.s }, p1/Z, [x20]\n" + "fmax z6.s, p3/M, z6.s, z11.s\n" + "fmax z5.s, p3/M, z5.s, z11.s\n" + "ld1w { z27.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "ld1w { z26.s }, p0/Z, [%x[in_ptr], #5, MUL VL]\n" + "fmax z4.s, p3/M, z4.s, z11.s\n" + "fmax z3.s, p3/M, z3.s, z11.s\n" + "ld1w { z25.s }, p0/Z, [x20, #-8, MUL VL]\n" + "ld1w { z24.s }, p0/Z, [x20, #-5, MUL VL]\n" + "fmax z2.s, p3/M, z2.s, z11.s\n" + "fmax z18.s, p3/M, z18.s, z11.s\n" + "ld1w { z23.s }, p0/Z, [x20, #-2, MUL VL]\n" + "ld1w { z22.s }, p0/Z, [x20, #1, MUL VL]\n" + ".inst 0x658aacd5 // bfcvt z21.h, p3/M, z6.s\n" + ".inst 0x658aacb4 // bfcvt z20.h, p3/M, z5.s\n" + "cmp x11, XZR\n" + "incw x21\n" + ".inst 0x658aac93 // bfcvt z19.h, p3/M, z4.s\n" + ".inst 0x658aac71 // bfcvt z17.h, p3/M, z3.s\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + ".inst 0x658aac50 // bfcvt z16.h, p3/M, z2.s\n" + ".inst 0x658aae52 // bfcvt z18.h, p3/M, z18.s\n" + "st1h { z21.s }, p2, [x10]\n" + "st1h { z20.s }, p2, [x28]\n" + "st1h { z19.s }, p2, [x27]\n" + "st1h { z17.s }, p2, [x26]\n" + "ld1h { z17.s }, p1/Z, [x10, #1, MUL VL]\n" + "st1h { z16.s }, p2, [x25]\n" + "ld1h { z16.s }, p1/Z, [x28, #1, MUL VL]\n" + "st1h { z18.s }, p2, [x24]\n" + "ld1h { z19.s }, p1/Z, [x27, #1, MUL VL]\n" + "ld1h { z18.s }, p1/Z, [x26, #1, MUL VL]\n" + "lsl z21.s, z17.s, #0x10\n" + "ld1h { z17.s }, p1/Z, [x25, #1, MUL VL]\n" + "lsl z20.s, z16.s, #0x10\n" + "ld1h { z16.s }, p1/Z, [x24, #1, MUL VL]\n" + "lsl z19.s, z19.s, #0x10\n" + "lsl z18.s, z18.s, #0x10\n" + "fadd z1.s, z1.s, z21.s\n" + "lsl z17.s, z17.s, #0x10\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z0.s, z0.s, z20.s\n" + "fadd z31.s, z31.s, z19.s\n" + "fadd z30.s, z30.s, z18.s\n" + "fmin z1.s, p3/M, z1.s, z12.s\n" + "fadd z29.s, z29.s, z17.s\n" + "fadd z28.s, z28.s, z16.s\n" + "fmin z0.s, p3/M, z0.s, z12.s\n" + "fmin z31.s, p3/M, z31.s, z12.s\n" + "fmin z30.s, p3/M, z30.s, z12.s\n" + "fmin z29.s, p3/M, z29.s, z12.s\n" + "fmax z1.s, p3/M, z1.s, z11.s\n" + "fmin z28.s, p3/M, z28.s, z12.s\n" + "fmax z0.s, p3/M, z0.s, z11.s\n" + "fmax z31.s, p3/M, z31.s, z11.s\n" + "fmax z30.s, p3/M, z30.s, z11.s\n" + "fmax z29.s, p3/M, z29.s, z11.s\n" + "fmax z28.s, p3/M, z28.s, z11.s\n" + ".inst 0x658aac34 // bfcvt z20.h, p3/M, z1.s\n" + ".inst 0x658aac12 // bfcvt z18.h, p3/M, z0.s\n" + ".inst 0x658aaff3 // bfcvt z19.h, p3/M, z31.s\n" + ".inst 0x658aafd1 // bfcvt z17.h, p3/M, z30.s\n" + ".inst 0x658aafb0 // bfcvt z16.h, p3/M, z29.s\n" + "st1h { z20.s }, p1, [x10, #1, MUL VL]\n" + "st1h { z18.s }, p1, [x28, #1, MUL VL]\n" + ".inst 0x658aaf92 // bfcvt z18.h, p3/M, z28.s\n" + "st1h { z19.s }, p1, [x27, #1, MUL VL]\n" + "st1h { z17.s }, p1, [x26, #1, MUL VL]\n" + "ld1h { z17.s }, p0/Z, [x10, #2, MUL VL]\n" + "st1h { z16.s }, p1, [x25, #1, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x28, #2, MUL VL]\n" + "st1h { z18.s }, p1, [x24, #1, MUL VL]\n" + "ld1h { z19.s }, p0/Z, [x27, #2, MUL VL]\n" + "ld1h { z18.s }, p0/Z, [x26, #2, MUL VL]\n" + "lsl z21.s, z17.s, #0x10\n" + "ld1h { z17.s }, p0/Z, [x25, #2, MUL VL]\n" + "lsl z20.s, z16.s, #0x10\n" + "ld1h { z16.s }, p0/Z, [x24, #2, MUL VL]\n" + "lsl z19.s, z19.s, #0x10\n" + "lsl z18.s, z18.s, #0x10\n" + "fadd z27.s, z27.s, z21.s\n" + "lsl z17.s, z17.s, #0x10\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z26.s, z26.s, z20.s\n" + "fadd z25.s, z25.s, z19.s\n" + "fadd z24.s, z24.s, z18.s\n" + "fmin z27.s, p3/M, z27.s, z12.s\n" + "fadd z23.s, z23.s, z17.s\n" + "fadd z22.s, z22.s, z16.s\n" + "fmin z26.s, p3/M, z26.s, z12.s\n" + "fmin z25.s, p3/M, z25.s, z12.s\n" + "fmin z24.s, p3/M, z24.s, z12.s\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmax z27.s, p3/M, z27.s, z11.s\n" + "fmin z22.s, p3/M, z22.s, z12.s\n" + "fmax z26.s, p3/M, z26.s, z11.s\n" + "fmax z25.s, p3/M, z25.s, z11.s\n" + "fmax z24.s, p3/M, z24.s, z11.s\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + "fmax z22.s, p3/M, z22.s, z11.s\n" + ".inst 0x658aaf74 // bfcvt z20.h, p3/M, z27.s\n" + ".inst 0x658aaf50 // bfcvt z16.h, p3/M, z26.s\n" + ".inst 0x658aaf33 // bfcvt z19.h, p3/M, z25.s\n" + ".inst 0x658aaf12 // bfcvt z18.h, p3/M, z24.s\n" + ".inst 0x658aaef1 // bfcvt z17.h, p3/M, z23.s\n" + "st1h { z20.s }, p0, [x10, #2, MUL VL]\n" + "inch x10, ALL, MUL #3\n" + "st1h { z16.s }, p0, [x28, #2, MUL VL]\n" + ".inst 0x658aaed0 // bfcvt z16.h, p3/M, z22.s\n" + "inch x28, ALL, MUL #3\n" + "st1h { z19.s }, p0, [x27, #2, MUL VL]\n" + "inch x27, ALL, MUL #3\n" + "st1h { z18.s }, p0, [x26, #2, MUL VL]\n" + "inch x26, ALL, MUL #3\n" + "st1h { z17.s }, p0, [x25, #2, MUL VL]\n" + "inch x25, ALL, MUL #3\n" + "st1h { z16.s }, p0, [x24, #2, MUL VL]\n" + "inch x24, ALL, MUL #3\n" + "bgt 47b\n" + "b 52f\n" + "48:" // Accumulate: Height 7 + "mov x10, %x[out_ptr]\n" + "mov x11, %x[cols]\n" + "add x28, x10, %x[ldout], LSL #1\n" + "add x27, x28, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "add x24, x25, %x[ldout], LSL #1\n" + "add x23, x24, %x[ldout], LSL #1\n" + "49:" // Accumulate: Height 7: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "ld1h { z22.s }, p2/Z, [x10]\n" + "ld1h { z21.s }, p2/Z, [x28]\n" + "ld1h { z20.s }, p2/Z, [x27]\n" + "ld1h { z19.s }, p2/Z, [x26]\n" + "ld1h { z18.s }, p2/Z, [x25]\n" + "ld1h { z17.s }, p2/Z, [x24]\n" + "ld1h { z16.s }, p2/Z, [x23]\n" + "ld1w { z8.s }, p2/Z, [%x[in_ptr]]\n" + "lsl z25.s, z22.s, #0x10\n" + "lsl z24.s, z21.s, #0x10\n" + "ld1w { z21.s }, p2/Z, [%x[in_ptr], #3, MUL VL]\n" + "ld1w { z7.s }, p2/Z, [%x[in_ptr], #6, MUL VL]\n" + "lsl z20.s, z20.s, #0x10\n" + "lsl z19.s, z19.s, #0x10\n" + "ld1w { z23.s }, p2/Z, [x20, #-7, MUL VL]\n" + "ld1w { z6.s }, p2/Z, [x20, #-4, MUL VL]\n" + "lsl z18.s, z18.s, #0x10\n" + "lsl z17.s, z17.s, #0x10\n" + "ld1w { z5.s }, p2/Z, [x20, #-1, MUL VL]\n" + "ld1w { z22.s }, p2/Z, [x20, #2, MUL VL]\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z8.s, z8.s, z25.s\n" + "fadd z21.s, z21.s, z24.s\n" + "fadd z7.s, z7.s, z20.s\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "fadd z23.s, z23.s, z19.s\n" + "fadd z6.s, z6.s, z18.s\n" + "fadd z5.s, z5.s, z17.s\n" + "fadd z22.s, z22.s, z16.s\n" + "fmin z8.s, p3/M, z8.s, z12.s\n" + "fmin z21.s, p3/M, z21.s, z12.s\n" + "fmin z7.s, p3/M, z7.s, z12.s\n" + "ld1w { z4.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "ld1w { z3.s }, p1/Z, [%x[in_ptr], #4, MUL VL]\n" + "whilelt p0.s, x21, x11\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmin z6.s, p3/M, z6.s, z12.s\n" + "ld1w { z2.s }, p1/Z, [%x[in_ptr], #7, MUL VL]\n" + "ld1w { z1.s }, p1/Z, [x20, #-6, MUL VL]\n" + "fmin z5.s, p3/M, z5.s, z12.s\n" + "fmin z22.s, p3/M, z22.s, z12.s\n" + "ld1w { z0.s }, p1/Z, [x20, #-3, MUL VL]\n" + "ld1w { z31.s }, p1/Z, [x20]\n" + "fmax z8.s, p3/M, z8.s, z11.s\n" + "fmax z21.s, p3/M, z21.s, z11.s\n" + "ld1w { z30.s }, p1/Z, [x20, #3, MUL VL]\n" + "ld1w { z29.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "fmax z7.s, p3/M, z7.s, z11.s\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + "ld1w { z28.s }, p0/Z, [%x[in_ptr], #5, MUL VL]\n" + "ld1w { z27.s }, p0/Z, [x20, #-8, MUL VL]\n" + "fmax z6.s, p3/M, z6.s, z11.s\n" + "fmax z5.s, p3/M, z5.s, z11.s\n" + "ld1w { z26.s }, p0/Z, [x20, #-5, MUL VL]\n" + "ld1w { z25.s }, p0/Z, [x20, #-2, MUL VL]\n" + "fmax z22.s, p3/M, z22.s, z11.s\n" + ".inst 0x658aad13 // bfcvt z19.h, p3/M, z8.s\n" + ".inst 0x658aaeb5 // bfcvt z21.h, p3/M, z21.s\n" + "ld1w { z24.s }, p0/Z, [x20, #1, MUL VL]\n" + ".inst 0x658aacf4 // bfcvt z20.h, p3/M, z7.s\n" + ".inst 0x658aaef2 // bfcvt z18.h, p3/M, z23.s\n" + "ld1w { z23.s }, p0/Z, [x20, #4, MUL VL]\n" + "decw x11, ALL, MUL #3\n" + ".inst 0x658aacd1 // bfcvt z17.h, p3/M, z6.s\n" + ".inst 0x658aacb0 // bfcvt z16.h, p3/M, z5.s\n" + "incw x21\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "st1h { z19.s }, p2, [x10]\n" + ".inst 0x658aaed3 // bfcvt z19.h, p3/M, z22.s\n" + "st1h { z21.s }, p2, [x28]\n" + "cmp x11, XZR\n" + "st1h { z20.s }, p2, [x27]\n" + "st1h { z18.s }, p2, [x26]\n" + "ld1h { z18.s }, p1/Z, [x10, #1, MUL VL]\n" + "st1h { z17.s }, p2, [x25]\n" + "ld1h { z17.s }, p1/Z, [x28, #1, MUL VL]\n" + "st1h { z16.s }, p2, [x24]\n" + "ld1h { z16.s }, p1/Z, [x27, #1, MUL VL]\n" + "st1h { z19.s }, p2, [x23]\n" + "ld1h { z19.s }, p1/Z, [x26, #1, MUL VL]\n" + "lsl z22.s, z18.s, #0x10\n" + "ld1h { z18.s }, p1/Z, [x25, #1, MUL VL]\n" + "lsl z21.s, z17.s, #0x10\n" + "ld1h { z17.s }, p1/Z, [x24, #1, MUL VL]\n" + "lsl z20.s, z16.s, #0x10\n" + "ld1h { z16.s }, p1/Z, [x23, #1, MUL VL]\n" + "lsl z19.s, z19.s, #0x10\n" + "lsl z18.s, z18.s, #0x10\n" + "fadd z4.s, z4.s, z22.s\n" + "lsl z17.s, z17.s, #0x10\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z3.s, z3.s, z21.s\n" + "fadd z2.s, z2.s, z20.s\n" + "fadd z1.s, z1.s, z19.s\n" + "fadd z0.s, z0.s, z18.s\n" + "fadd z31.s, z31.s, z17.s\n" + "fmin z4.s, p3/M, z4.s, z12.s\n" + "fadd z30.s, z30.s, z16.s\n" + "fmin z3.s, p3/M, z3.s, z12.s\n" + "fmin z2.s, p3/M, z2.s, z12.s\n" + "fmin z1.s, p3/M, z1.s, z12.s\n" + "fmin z0.s, p3/M, z0.s, z12.s\n" + "fmin z31.s, p3/M, z31.s, z12.s\n" + "fmax z4.s, p3/M, z4.s, z11.s\n" + "fmin z30.s, p3/M, z30.s, z12.s\n" + "fmax z3.s, p3/M, z3.s, z11.s\n" + "fmax z2.s, p3/M, z2.s, z11.s\n" + "fmax z1.s, p3/M, z1.s, z11.s\n" + "fmax z0.s, p3/M, z0.s, z11.s\n" + "fmax z31.s, p3/M, z31.s, z11.s\n" + ".inst 0x658aac90 // bfcvt z16.h, p3/M, z4.s\n" + "fmax z30.s, p3/M, z30.s, z11.s\n" + ".inst 0x658aac74 // bfcvt z20.h, p3/M, z3.s\n" + ".inst 0x658aac53 // bfcvt z19.h, p3/M, z2.s\n" + ".inst 0x658aac32 // bfcvt z18.h, p3/M, z1.s\n" + "st1h { z16.s }, p1, [x10, #1, MUL VL]\n" + ".inst 0x658aac11 // bfcvt z17.h, p3/M, z0.s\n" + ".inst 0x658aaff0 // bfcvt z16.h, p3/M, z31.s\n" + "st1h { z20.s }, p1, [x28, #1, MUL VL]\n" + "st1h { z19.s }, p1, [x27, #1, MUL VL]\n" + ".inst 0x658aafd3 // bfcvt z19.h, p3/M, z30.s\n" + "st1h { z18.s }, p1, [x26, #1, MUL VL]\n" + "ld1h { z18.s }, p0/Z, [x10, #2, MUL VL]\n" + "st1h { z17.s }, p1, [x25, #1, MUL VL]\n" + "ld1h { z17.s }, p0/Z, [x28, #2, MUL VL]\n" + "st1h { z16.s }, p1, [x24, #1, MUL VL]\n" + "ld1h { z16.s }, p0/Z, [x27, #2, MUL VL]\n" + "st1h { z19.s }, p1, [x23, #1, MUL VL]\n" + "ld1h { z19.s }, p0/Z, [x26, #2, MUL VL]\n" + "lsl z22.s, z18.s, #0x10\n" + "ld1h { z18.s }, p0/Z, [x25, #2, MUL VL]\n" + "lsl z21.s, z17.s, #0x10\n" + "ld1h { z17.s }, p0/Z, [x24, #2, MUL VL]\n" + "lsl z20.s, z16.s, #0x10\n" + "ld1h { z16.s }, p0/Z, [x23, #2, MUL VL]\n" + "lsl z19.s, z19.s, #0x10\n" + "lsl z18.s, z18.s, #0x10\n" + "fadd z29.s, z29.s, z22.s\n" + "lsl z17.s, z17.s, #0x10\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z28.s, z28.s, z21.s\n" + "fadd z27.s, z27.s, z20.s\n" + "fadd z26.s, z26.s, z19.s\n" + "fadd z25.s, z25.s, z18.s\n" + "fadd z24.s, z24.s, z17.s\n" + "fmin z29.s, p3/M, z29.s, z12.s\n" + "fadd z23.s, z23.s, z16.s\n" + "fmin z28.s, p3/M, z28.s, z12.s\n" + "fmin z27.s, p3/M, z27.s, z12.s\n" + "fmin z26.s, p3/M, z26.s, z12.s\n" + "fmin z25.s, p3/M, z25.s, z12.s\n" + "fmin z24.s, p3/M, z24.s, z12.s\n" + "fmax z29.s, p3/M, z29.s, z11.s\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmax z28.s, p3/M, z28.s, z11.s\n" + "fmax z27.s, p3/M, z27.s, z11.s\n" + "fmax z26.s, p3/M, z26.s, z11.s\n" + "fmax z25.s, p3/M, z25.s, z11.s\n" + "fmax z24.s, p3/M, z24.s, z11.s\n" + ".inst 0x658aafb1 // bfcvt z17.h, p3/M, z29.s\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + ".inst 0x658aaf94 // bfcvt z20.h, p3/M, z28.s\n" + ".inst 0x658aaf70 // bfcvt z16.h, p3/M, z27.s\n" + ".inst 0x658aaf53 // bfcvt z19.h, p3/M, z26.s\n" + "st1h { z17.s }, p0, [x10, #2, MUL VL]\n" + "inch x10, ALL, MUL #3\n" + ".inst 0x658aaf32 // bfcvt z18.h, p3/M, z25.s\n" + ".inst 0x658aaf11 // bfcvt z17.h, p3/M, z24.s\n" + "st1h { z20.s }, p0, [x28, #2, MUL VL]\n" + "inch x28, ALL, MUL #3\n" + "st1h { z16.s }, p0, [x27, #2, MUL VL]\n" + ".inst 0x658aaef0 // bfcvt z16.h, p3/M, z23.s\n" + "inch x27, ALL, MUL #3\n" + "st1h { z19.s }, p0, [x26, #2, MUL VL]\n" + "inch x26, ALL, MUL #3\n" + "st1h { z18.s }, p0, [x25, #2, MUL VL]\n" + "inch x25, ALL, MUL #3\n" + "st1h { z17.s }, p0, [x24, #2, MUL VL]\n" + "inch x24, ALL, MUL #3\n" + "st1h { z16.s }, p0, [x23, #2, MUL VL]\n" + "inch x23, ALL, MUL #3\n" + "bgt 49b\n" + "b 52f\n" + "50:" // Accumulate: Height 8 + "mov x10, %x[out_ptr]\n" + "mov x11, %x[cols]\n" + "add x28, x10, %x[ldout], LSL #1\n" + "add x27, x28, %x[ldout], LSL #1\n" + "add x26, x27, %x[ldout], LSL #1\n" + "add x25, x26, %x[ldout], LSL #1\n" + "add x24, x25, %x[ldout], LSL #1\n" + "add x23, x24, %x[ldout], LSL #1\n" + "add x22, x23, %x[ldout], LSL #1\n" + "51:" // Accumulate: Height 8: Block loop + "mov x21, #0x0\n" + "addvl x20, %x[in_ptr], #16\n" + "whilelt p2.s, x21, x11\n" + "incw x21\n" + "ld1h { z23.s }, p2/Z, [x10]\n" + "ld1h { z22.s }, p2/Z, [x28]\n" + "ld1h { z21.s }, p2/Z, [x27]\n" + "ld1h { z20.s }, p2/Z, [x26]\n" + "ld1h { z19.s }, p2/Z, [x25]\n" + "ld1h { z18.s }, p2/Z, [x24]\n" + "ld1h { z17.s }, p2/Z, [x23]\n" + "ld1h { z16.s }, p2/Z, [x22]\n" + "lsl z31.s, z23.s, #0x10\n" + "lsl z30.s, z22.s, #0x10\n" + "ld1w { z29.s }, p2/Z, [%x[in_ptr]]\n" + "ld1w { z28.s }, p2/Z, [%x[in_ptr], #3, MUL VL]\n" + "lsl z27.s, z21.s, #0x10\n" + "lsl z26.s, z20.s, #0x10\n" + "ld1w { z21.s }, p2/Z, [%x[in_ptr], #6, MUL VL]\n" + "ld1w { z25.s }, p2/Z, [x20, #-7, MUL VL]\n" + "lsl z20.s, z19.s, #0x10\n" + "lsl z19.s, z18.s, #0x10\n" + "ld1w { z18.s }, p2/Z, [x20, #-4, MUL VL]\n" + "ld1w { z24.s }, p2/Z, [x20, #-1, MUL VL]\n" + "lsl z17.s, z17.s, #0x10\n" + "lsl z16.s, z16.s, #0x10\n" + "ld1w { z23.s }, p2/Z, [x20, #2, MUL VL]\n" + "ld1w { z22.s }, p2/Z, [x20, #5, MUL VL]\n" + "fadd z29.s, z29.s, z31.s\n" + "fadd z28.s, z28.s, z30.s\n" + "fadd z21.s, z21.s, z27.s\n" + "fadd z25.s, z25.s, z26.s\n" + "whilelt p1.s, x21, x11\n" + "incw x21\n" + "fadd z18.s, z18.s, z20.s\n" + "fadd z24.s, z24.s, z19.s\n" + "fadd z23.s, z23.s, z17.s\n" + "fadd z22.s, z22.s, z16.s\n" + "fmin z29.s, p3/M, z29.s, z12.s\n" + "fmin z28.s, p3/M, z28.s, z12.s\n" + "fmin z21.s, p3/M, z21.s, z12.s\n" + "fmin z25.s, p3/M, z25.s, z12.s\n" + "ld1w { z6.s }, p1/Z, [%x[in_ptr], #1, MUL VL]\n" + "ld1w { z5.s }, p1/Z, [%x[in_ptr], #4, MUL VL]\n" + "fmin z18.s, p3/M, z18.s, z12.s\n" + "fmin z24.s, p3/M, z24.s, z12.s\n" + "ld1w { z4.s }, p1/Z, [%x[in_ptr], #7, MUL VL]\n" + "ld1w { z3.s }, p1/Z, [x20, #-6, MUL VL]\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmin z22.s, p3/M, z22.s, z12.s\n" + "ld1w { z2.s }, p1/Z, [x20, #-3, MUL VL]\n" + "ld1w { z1.s }, p1/Z, [x20]\n" + "fmax z29.s, p3/M, z29.s, z11.s\n" + "fmax z28.s, p3/M, z28.s, z11.s\n" + "ld1w { z0.s }, p1/Z, [x20, #3, MUL VL]\n" + "ld1w { z31.s }, p1/Z, [x20, #6, MUL VL]\n" + "fmax z21.s, p3/M, z21.s, z11.s\n" + "fmax z25.s, p3/M, z25.s, z11.s\n" + "fmax z18.s, p3/M, z18.s, z11.s\n" + "fmax z24.s, p3/M, z24.s, z11.s\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + "fmax z22.s, p3/M, z22.s, z11.s\n" + ".inst 0x658aafb4 // bfcvt z20.h, p3/M, z29.s\n" + ".inst 0x658aaf93 // bfcvt z19.h, p3/M, z28.s\n" + ".inst 0x658aaeb5 // bfcvt z21.h, p3/M, z21.s\n" + ".inst 0x658aaf30 // bfcvt z16.h, p3/M, z25.s\n" + "whilelt p0.s, x21, x11\n" + "decw x11, ALL, MUL #3\n" + ".inst 0x658aae52 // bfcvt z18.h, p3/M, z18.s\n" + ".inst 0x658aaf11 // bfcvt z17.h, p3/M, z24.s\n" + "incw x21\n" + "st1h { z20.s }, p2, [x10]\n" + "st1h { z19.s }, p2, [x28]\n" + ".inst 0x658aaef4 // bfcvt z20.h, p3/M, z23.s\n" + ".inst 0x658aaed3 // bfcvt z19.h, p3/M, z22.s\n" + "st1h { z21.s }, p2, [x27]\n" + "ld1w { z30.s }, p0/Z, [%x[in_ptr], #2, MUL VL]\n" + "ld1w { z29.s }, p0/Z, [%x[in_ptr], #5, MUL VL]\n" + "cmp x11, XZR\n" + "st1h { z16.s }, p2, [x26]\n" + "ld1h { z16.s }, p1/Z, [x10, #1, MUL VL]\n" + "ld1w { z28.s }, p0/Z, [x20, #-8, MUL VL]\n" + "addvl %x[in_ptr], %x[in_ptr], #24\n" + "st1h { z18.s }, p2, [x25]\n" + "ld1h { z18.s }, p1/Z, [x28, #1, MUL VL]\n" + "ld1w { z27.s }, p0/Z, [x20, #-5, MUL VL]\n" + "st1h { z17.s }, p2, [x24]\n" + "ld1h { z17.s }, p1/Z, [x27, #1, MUL VL]\n" + "ld1w { z26.s }, p0/Z, [x20, #-2, MUL VL]\n" + "st1h { z20.s }, p2, [x23]\n" + "ld1h { z20.s }, p1/Z, [x26, #1, MUL VL]\n" + "lsl z16.s, z16.s, #0x10\n" + "ld1w { z25.s }, p0/Z, [x20, #1, MUL VL]\n" + "st1h { z19.s }, p2, [x22]\n" + "ld1h { z19.s }, p1/Z, [x25, #1, MUL VL]\n" + "lsl z22.s, z18.s, #0x10\n" + "ld1w { z24.s }, p0/Z, [x20, #4, MUL VL]\n" + "ld1h { z18.s }, p1/Z, [x24, #1, MUL VL]\n" + "lsl z21.s, z17.s, #0x10\n" + "ld1w { z23.s }, p0/Z, [x20, #7, MUL VL]\n" + "ld1h { z17.s }, p1/Z, [x23, #1, MUL VL]\n" + "lsl z20.s, z20.s, #0x10\n" + "fadd z6.s, z6.s, z16.s\n" + "ld1h { z16.s }, p1/Z, [x22, #1, MUL VL]\n" + "lsl z19.s, z19.s, #0x10\n" + "fadd z5.s, z5.s, z22.s\n" + "lsl z18.s, z18.s, #0x10\n" + "fadd z4.s, z4.s, z21.s\n" + "lsl z17.s, z17.s, #0x10\n" + "lsl z16.s, z16.s, #0x10\n" + "fmin z6.s, p3/M, z6.s, z12.s\n" + "fadd z3.s, z3.s, z20.s\n" + "fadd z2.s, z2.s, z19.s\n" + "fmin z5.s, p3/M, z5.s, z12.s\n" + "fadd z1.s, z1.s, z18.s\n" + "fmin z4.s, p3/M, z4.s, z12.s\n" + "fadd z0.s, z0.s, z17.s\n" + "fadd z31.s, z31.s, z16.s\n" + "fmax z6.s, p3/M, z6.s, z11.s\n" + "fmin z3.s, p3/M, z3.s, z12.s\n" + "fmin z2.s, p3/M, z2.s, z12.s\n" + "fmax z5.s, p3/M, z5.s, z11.s\n" + "fmin z1.s, p3/M, z1.s, z12.s\n" + "fmin z0.s, p3/M, z0.s, z12.s\n" + "fmin z31.s, p3/M, z31.s, z12.s\n" + "fmax z4.s, p3/M, z4.s, z11.s\n" + ".inst 0x658aacd0 // bfcvt z16.h, p3/M, z6.s\n" + "fmax z3.s, p3/M, z3.s, z11.s\n" + "fmax z2.s, p3/M, z2.s, z11.s\n" + ".inst 0x658aacb1 // bfcvt z17.h, p3/M, z5.s\n" + "fmax z1.s, p3/M, z1.s, z11.s\n" + "fmax z0.s, p3/M, z0.s, z11.s\n" + "fmax z31.s, p3/M, z31.s, z11.s\n" + "st1h { z16.s }, p1, [x10, #1, MUL VL]\n" + ".inst 0x658aac90 // bfcvt z16.h, p3/M, z4.s\n" + "st1h { z17.s }, p1, [x28, #1, MUL VL]\n" + ".inst 0x658aac75 // bfcvt z21.h, p3/M, z3.s\n" + ".inst 0x658aac52 // bfcvt z18.h, p3/M, z2.s\n" + ".inst 0x658aac31 // bfcvt z17.h, p3/M, z1.s\n" + ".inst 0x658aac14 // bfcvt z20.h, p3/M, z0.s\n" + "st1h { z16.s }, p1, [x27, #1, MUL VL]\n" + ".inst 0x658aaff3 // bfcvt z19.h, p3/M, z31.s\n" + "ld1h { z16.s }, p0/Z, [x10, #2, MUL VL]\n" + "st1h { z21.s }, p1, [x26, #1, MUL VL]\n" + "st1h { z18.s }, p1, [x25, #1, MUL VL]\n" + "ld1h { z18.s }, p0/Z, [x28, #2, MUL VL]\n" + "st1h { z17.s }, p1, [x24, #1, MUL VL]\n" + "ld1h { z17.s }, p0/Z, [x27, #2, MUL VL]\n" + "st1h { z20.s }, p1, [x23, #1, MUL VL]\n" + "ld1h { z20.s }, p0/Z, [x26, #2, MUL VL]\n" + "lsl z16.s, z16.s, #0x10\n" + "st1h { z19.s }, p1, [x22, #1, MUL VL]\n" + "ld1h { z19.s }, p0/Z, [x25, #2, MUL VL]\n" + "lsl z22.s, z18.s, #0x10\n" + "ld1h { z18.s }, p0/Z, [x24, #2, MUL VL]\n" + "lsl z21.s, z17.s, #0x10\n" + "ld1h { z17.s }, p0/Z, [x23, #2, MUL VL]\n" + "lsl z20.s, z20.s, #0x10\n" + "fadd z30.s, z30.s, z16.s\n" + "ld1h { z16.s }, p0/Z, [x22, #2, MUL VL]\n" + "lsl z19.s, z19.s, #0x10\n" + "lsl z18.s, z18.s, #0x10\n" + "fadd z29.s, z29.s, z22.s\n" + "lsl z17.s, z17.s, #0x10\n" + "fadd z28.s, z28.s, z21.s\n" + "lsl z16.s, z16.s, #0x10\n" + "fadd z27.s, z27.s, z20.s\n" + "fmin z30.s, p3/M, z30.s, z12.s\n" + "fadd z26.s, z26.s, z19.s\n" + "fadd z25.s, z25.s, z18.s\n" + "fmin z29.s, p3/M, z29.s, z12.s\n" + "fadd z24.s, z24.s, z17.s\n" + "fmin z28.s, p3/M, z28.s, z12.s\n" + "fadd z23.s, z23.s, z16.s\n" + "fmin z27.s, p3/M, z27.s, z12.s\n" + "fmax z30.s, p3/M, z30.s, z11.s\n" + "fmin z26.s, p3/M, z26.s, z12.s\n" + "fmin z25.s, p3/M, z25.s, z12.s\n" + "fmax z29.s, p3/M, z29.s, z11.s\n" + "fmin z24.s, p3/M, z24.s, z12.s\n" + "fmin z23.s, p3/M, z23.s, z12.s\n" + "fmax z28.s, p3/M, z28.s, z11.s\n" + "fmax z27.s, p3/M, z27.s, z11.s\n" + ".inst 0x658aafd0 // bfcvt z16.h, p3/M, z30.s\n" + "fmax z26.s, p3/M, z26.s, z11.s\n" + "fmax z25.s, p3/M, z25.s, z11.s\n" + ".inst 0x658aafb1 // bfcvt z17.h, p3/M, z29.s\n" + "fmax z24.s, p3/M, z24.s, z11.s\n" + "fmax z23.s, p3/M, z23.s, z11.s\n" + "st1h { z16.s }, p0, [x10, #2, MUL VL]\n" + ".inst 0x658aaf90 // bfcvt z16.h, p3/M, z28.s\n" + ".inst 0x658aaf74 // bfcvt z20.h, p3/M, z27.s\n" + "inch x10, ALL, MUL #3\n" + "st1h { z17.s }, p0, [x28, #2, MUL VL]\n" + "inch x28, ALL, MUL #3\n" + ".inst 0x658aaf53 // bfcvt z19.h, p3/M, z26.s\n" + ".inst 0x658aaf32 // bfcvt z18.h, p3/M, z25.s\n" + "st1h { z16.s }, p0, [x27, #2, MUL VL]\n" + "inch x27, ALL, MUL #3\n" + ".inst 0x658aaf11 // bfcvt z17.h, p3/M, z24.s\n" + ".inst 0x658aaef0 // bfcvt z16.h, p3/M, z23.s\n" + "st1h { z20.s }, p0, [x26, #2, MUL VL]\n" + "inch x26, ALL, MUL #3\n" + "st1h { z19.s }, p0, [x25, #2, MUL VL]\n" + "inch x25, ALL, MUL #3\n" + "st1h { z18.s }, p0, [x24, #2, MUL VL]\n" + "inch x24, ALL, MUL #3\n" + "st1h { z17.s }, p0, [x23, #2, MUL VL]\n" + "inch x23, ALL, MUL #3\n" + "st1h { z16.s }, p0, [x22, #2, MUL VL]\n" + "inch x22, ALL, MUL #3\n" + "bgt 51b\n" + "subs %x[rows], %x[rows], #0x8\n" + "add %x[out_ptr], %x[out_ptr], x12\n" + "bgt 35b\n" + "52:" // Exit + : [in_ptr] "+&r" (in_ptr), [out_ptr] "+&r" (out_ptr), [rows] "+&r" (rows) + : [accumulate] "r" (accumulate), [bias] "r" (bias), [cols] "r" (cols), [ldout] "r" (ldout), [maxval] "r" (maxval), [minval] "r" (minval) + : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" + ); +} + +#endif // ARM_COMPUTE_ENABLE_SVE diff --git a/src/cpu/operators/CpuMatMul.cpp b/src/cpu/operators/CpuMatMul.cpp index 89087129c3..f68ae9883f 100644 --- a/src/cpu/operators/CpuMatMul.cpp +++ b/src/cpu/operators/CpuMatMul.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023 Arm Limited. + * Copyright (c) 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -102,8 +102,8 @@ Status CpuMatMul::validate(const ITensorInfo *lhs, const ActivationLayerInfo &act_info) { ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(lhs, rhs, dst); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16, DataType::QASYMM8, - DataType::QASYMM8_SIGNED); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F32, DataType::F16, DataType::BFLOAT16, + DataType::QASYMM8, DataType::QASYMM8_SIGNED); ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs->are_values_constant(), "LHS Tensor must be dynamic."); ARM_COMPUTE_RETURN_ERROR_ON_MSG(rhs->are_values_constant(), "RHS Tensor must be dynamic."); ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(lhs); @@ -120,6 +120,7 @@ Status CpuMatMul::validate(const ITensorInfo *lhs, auto gemm_info = AsmGemmInfo(); gemm_info.activation_info = act_info; gemm_info.fast_mode = settings.fast_math(); + gemm_info.fixed_format = settings.fixed_format(); // Validate and then permute a/b if (adj_lhs) @@ -157,6 +158,14 @@ Status CpuMatMul::validate(const ITensorInfo *lhs, gemm_info.activation_info, gemm_info.output_stage)); } + if (gemm_info.fixed_format) + { + gemm_info.weight_format = WeightFormat::ANY; + arm_compute::WeightFormat expected_weight_format = WeightFormat::ANY; + ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuGemmAssemblyDispatch::has_opt_impl(expected_weight_format, lhs_to_use, + rhs_to_use, nullptr, dst, gemm_info)); + } + cpu::CpuGemmAssemblyDispatch::validate(lhs_to_use, rhs_to_use, nullptr, dst, gemm_info); return Status{}; @@ -221,6 +230,7 @@ void CpuMatMul::configure(ITensorInfo *lhs, // Fill AsmGemmInfo class object before configuration _gemm_info.activation_info = act_info; _gemm_info.fast_mode = settings.fast_math(); + _gemm_info.fixed_format = settings.fixed_format(); _gemm_info.negated_offsets = false; lhs_to_use = (_adj_lhs) ? _lhs_transposed : lhs_to_use; @@ -233,6 +243,18 @@ void CpuMatMul::configure(ITensorInfo *lhs, _gemm_info.output_stage); } + if (_gemm_info.fixed_format) + { + _gemm_info.weight_format = WeightFormat::ANY; + arm_compute::WeightFormat expected_weight_format = WeightFormat::ANY; + ARM_COMPUTE_ERROR_THROW_ON(cpu::CpuGemmAssemblyDispatch::has_opt_impl(expected_weight_format, &lhs_to_use, + &rhs_to_use, nullptr, dst, _gemm_info)); + // Set gemm weights info to the one returned by has_opt_impl + _gemm_info.weight_format = expected_weight_format; + // has_opt_impl may return a non fast math kernel, even if we requested one + _gemm_info.fast_mode = arm_compute::is_fixed_format_fast_math(expected_weight_format); + } + // Configure Asm Kernel _asm_glue = std::make_unique(); _asm_glue->configure(&lhs_to_use, &rhs_to_use, nullptr, &dst_to_use, diff --git a/src/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp b/src/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp index 58ee68fd49..efe2a7a67e 100644 --- a/src/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp +++ b/src/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp @@ -581,7 +581,6 @@ void Fallback::prepare(ITensorPack &tensors) // Fixed format kernels need no pretranspose. ARM_COMPUTE_ERROR_ON(arm_compute::is_fixed_format( assembly_utils::map_to_arm_compute_weight_format(_gemm_kernel_asm->get_config().weight_format))); - const int ldb = b_to_use->info()->strides_in_bytes().y() / b_to_use->info()->element_size(); const auto in1_ptr = reinterpret_cast(b_to_use->buffer() + b_to_use->info()->offset_first_element_in_bytes()); @@ -857,6 +856,7 @@ Status CpuGemmAssemblyDispatch::has_opt_impl(arm_compute::WeightFormat &expected arm_gemm::WeightFormat arm_gemm_expected_wf = assembly_utils::map_to_arm_gemm_weight_format(expected_weight_format); arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.sections, p.batches, p.multis, p.indirect, act, num_threads, info.fixed_format, info.fast_mode, &cfg); + // TODO: Incorporate info.transpose_b COMPMID-6595 switch (a->data_type()) { @@ -900,9 +900,18 @@ Status CpuGemmAssemblyDispatch::has_opt_impl(arm_compute::WeightFormat &expected #if defined(ARM_COMPUTE_ENABLE_BF16) case DataType::BFLOAT16: { - ARM_COMPUTE_RETURN_ERROR_ON_MSG( - !(arm_gemm::has_opt_gemm(arm_gemm_expected_wf, args, {})), - "We could not find an optimized kernel for BFLOAT16 input and F32 output"); + if (d->data_type() == DataType::BFLOAT16) + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG( + !(arm_gemm::has_opt_gemm(arm_gemm_expected_wf, args, {})), + "We could not find an optimized kernel for BFLOAT16 input and BFLOAT16 output"); + } + else + { + ARM_COMPUTE_RETURN_ERROR_ON_MSG( + !(arm_gemm::has_opt_gemm(arm_gemm_expected_wf, args, {})), + "We could not find an optimized kernel for BFLOAT16 input and F32 output"); + } break; } #endif /* defined(ARM_COMPUTE_ENABLE_BF16) */ @@ -958,8 +967,9 @@ Status CpuGemmAssemblyDispatch::validate( "Only F32 output supported for F32 input"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::F16 && d->data_type() != DataType::F16, "Only F16 output supported for F16 input"); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::BFLOAT16 && d->data_type() != DataType::F32, - "Only F32 output supported for BFLOAT16 input"); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::BFLOAT16 && + (d->data_type() != DataType::F32 && d->data_type() != DataType::BFLOAT16), + "Only F32/BFLOAT16 output supported for BFLOAT16 input"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::U8 && d->data_type() != DataType::U32, "Only U32 output supported for U8 input"); ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::S8 && d->data_type() != DataType::S32, @@ -1030,7 +1040,14 @@ void CpuGemmAssemblyDispatch::configure( #endif /* __aarch64__ */ #if defined(ARM_COMPUTE_ENABLE_BF16) case DataType::BFLOAT16: - create_arm_gemm(_arm_gemm, a, b, c, d, act, info); + if (d->data_type() == DataType::BFLOAT16) + { + create_arm_gemm(_arm_gemm, a, b, c, d, act, info); + } + else + { + create_arm_gemm(_arm_gemm, a, b, c, d, act, info); + } break; #endif /* defined(ARM_COMPUTE_ENABLE_BF16) */ #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC diff --git a/support/Bfloat16.h b/support/Bfloat16.h index 17013294e2..02772898a8 100644 --- a/support/Bfloat16.h +++ b/support/Bfloat16.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2022 Arm Limited. + * Copyright (c) 2020-2022,2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,12 +21,12 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_BFLOAT16_H -#define ARM_COMPUTE_BFLOAT16_H +#ifndef ACL_SUPPORT_BFLOAT16_H +#define ACL_SUPPORT_BFLOAT16_H #include #include - +#include namespace arm_compute { namespace @@ -131,8 +131,16 @@ public: return val; } + bfloat16 &operator+=(float v) + { + value = float_to_bf16(bf16_to_float(value) + v); + return *this; + } + + friend std::ostream &operator<<(std::ostream &os, const bfloat16 &arg); + private: uint16_t value; }; } // namespace arm_compute -#endif /* ARM_COMPUTE_BFLOAT16_H */ +#endif // ACL_SUPPORT_BFLOAT16_H diff --git a/tests/SConscript b/tests/SConscript index 305f1693d1..0907c5713b 100644 --- a/tests/SConscript +++ b/tests/SConscript @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# Copyright (c) 2017-2023 Arm Limited. +# Copyright (c) 2017-2023,2024 Arm Limited. # # SPDX-License-Identifier: MIT # @@ -81,6 +81,9 @@ if 'macos' in test_env['os']: load_whole_archive = '-Wl,-force_load' noload_whole_archive = '' +if (env['multi_isa']): + test_env.Append(CPPDEFINES=['ARM_COMPUTE_ENABLE_BF16']) + if env['os'] in ['android', 'macos', 'bare_metal'] or env['standalone']: Import("arm_compute_a") Import("arm_compute_graph_a") diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h index 647adcdb69..e044620556 100644 --- a/tests/validation/Helpers.h +++ b/tests/validation/Helpers.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2023 Arm Limited. + * Copyright (c) 2017-2023,2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,6 +27,7 @@ #include "arm_compute/core/Types.h" #include "arm_compute/core/Utils.h" #include "arm_compute/function_info/ActivationLayerInfo.h" + #include "support/Half.h" #include "tests/Globals.h" #include "tests/SimpleTensor.h" @@ -52,6 +53,10 @@ template <> struct is_floating_point : public std::true_type { }; +template <> +struct is_floating_point : public std::true_type +{ +}; /** Helper struct to store the hints for * - destination quantization info @@ -78,13 +83,13 @@ std::pair get_activation_layer_test_bounds(ActivationLayerInfo::Activation { std::pair bounds; - switch(data_type) + switch (data_type) { case DataType::F16: { using namespace half_float::literal; - switch(activation) + switch (activation) { case ActivationLayerInfo::ActivationFunction::TANH: case ActivationLayerInfo::ActivationFunction::SQUARE: @@ -104,7 +109,7 @@ std::pair get_activation_layer_test_bounds(ActivationLayerInfo::Activation break; } case DataType::F32: - switch(activation) + switch (activation) { case ActivationLayerInfo::ActivationFunction::SOFT_RELU: // Reduce range as exponent overflows @@ -227,7 +232,8 @@ std::pair get_quantized_qasymm8_signed_bounds(const QuantizationInfo & * @param[in] max Floating point maximum value to be quantized * @param[in] channel_id Channel id for per channel quantization info. */ -std::pair get_symm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id = 0); +std::pair +get_symm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id = 0); /** Add random padding along the X axis (between 1 and 16 columns per side) to all the input tensors. * This is used in our validation suite in order to simulate implicit padding addition after configuring, but before allocating. @@ -238,7 +244,9 @@ std::pair get_symm_quantized_per_channel_bounds(const QuantizationInfo * * @note This function adds padding to the input tensors only if data_layout == DataLayout::NHWC */ -void add_padding_x(std::initializer_list tensors, const DataLayout &data_layout = DataLayout::NHWC, bool only_right_pad = false); +void add_padding_x(std::initializer_list tensors, + const DataLayout &data_layout = DataLayout::NHWC, + bool only_right_pad = false); /** For 2d convolution, given the Lhs/Rhs matrix quantization informations and the convolution dimension, * calculate a suitable output quantization and suggested bias range for obtaining non-saturated outputs with high probability. @@ -255,11 +263,11 @@ void add_padding_x(std::initializer_list tensors, const DataLayout &d */ QuantizationHint suggest_conv_dst_q_info_and_bias(const QuantizationInfo &in_q_info, const QuantizationInfo &weight_q_info, - int32_t height, - int32_t width, - int32_t channels, - DataType data_type, - float bias_fraction); + int32_t height, + int32_t width, + int32_t channels, + DataType data_type, + float bias_fraction); /** For a matrix multiplication, given the Lhs/Rhs matrix quantization informations and the matrix multiplication dimensions, * calculate a suitable output quantization and suggested bias range for obtaining non-saturated outputs with high probability. @@ -275,8 +283,12 @@ QuantizationHint suggest_conv_dst_q_info_and_bias(const QuantizationInfo &in_q_i * @return QuantizationHint object containing the suggested output quantization info and min/max bias range */ QuantizationHint suggest_matmul_dst_q_info_and_bias(const QuantizationInfo &lhs_q_info, - const QuantizationInfo &rhs_q_info, int32_t m, int32_t n, int32_t k, DataType data_type, - float bias_fraction); + const QuantizationInfo &rhs_q_info, + int32_t m, + int32_t n, + int32_t k, + DataType data_type, + float bias_fraction); /** For a multiply-accumulate (mac), given the Lhs/Rhs vector quantization informations and the dot product dimensions, * calculate a suitable output quantization and suggested bias range for obtaining non-saturated outputs with high probability. @@ -291,8 +303,11 @@ QuantizationHint suggest_matmul_dst_q_info_and_bias(const QuantizationInfo &lhs_ * @return QuantizationHint object containing the suggested output quantization info and min/max bias range */ QuantizationHint suggest_mac_dst_q_info_and_bias(const QuantizationInfo &lhs_q_info, - const QuantizationInfo &rhs_q_info, int32_t k, DataType data_type, float bias_fraction, - int num_sd = 2); + const QuantizationInfo &rhs_q_info, + int32_t k, + DataType data_type, + float bias_fraction, + int num_sd = 2); } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/NEON/MatMul.cpp b/tests/validation/NEON/MatMul.cpp index f91dea1b4f..02f0bfda1e 100644 --- a/tests/validation/NEON/MatMul.cpp +++ b/tests/validation/NEON/MatMul.cpp @@ -24,15 +24,14 @@ #include "arm_compute/core/Types.h" #include "arm_compute/runtime/NEON/functions/NEMatMul.h" -#include "tests/NEON/Accessor.h" -#include "tests/framework/Asserts.h" -#include "tests/framework/Macros.h" -#include "tests/framework/datasets/Datasets.h" -#include "tests/validation/Validation.h" - #include "tests/datasets/LargeMatMulDataset.h" #include "tests/datasets/SmallMatMulDataset.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/framework/Macros.h" +#include "tests/NEON/Accessor.h" #include "tests/validation/fixtures/MatMulFixture.h" +#include "tests/validation/Validation.h" namespace arm_compute { @@ -45,8 +44,9 @@ using framework::dataset::make; TEST_SUITE(NEON) TEST_SUITE(MatMul) -constexpr AbsoluteTolerance tolerance_fp32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for FP32 data types */ -const AbsoluteTolerance tolerance_fp16(half(0.1f)); +constexpr AbsoluteTolerance tolerance_fp32( + 0.001f); /**< Tolerance value for comparing reference's output against implementation's output for FP32 data types */ +const AbsoluteTolerance tolerance_fp16(half(0.1f)); #ifdef __aarch64__ constexpr AbsoluteTolerance tolerance_qasymm8(1); constexpr AbsoluteTolerance tolerance_qasymm8_signed(1); @@ -120,55 +120,79 @@ template using NEMatMulFastMathFixture = MatMulGenericValidationFixture; template -using NEMatMulDynamicTensorsFixture = MatMulValidationWithDynamicTensorsFixture; +using NEMatMulFixedFormatFixture = MatMulFixedFormatFixture; + +template +using NEMatMulDynamicTensorsFixture = + MatMulValidationWithDynamicTensorsFixture; template using NEQuantizedMatMulFixture = QuantizedMatMulValidationFixture; TEST_SUITE(Float) TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFixture, framework::DatasetMode::PRECOMMIT, - combine( - datasets::SmallMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F32), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEMatMulFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F32), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::LargeMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F32), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunLarge, + NEMatMulFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F32), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); } -FIXTURE_DATA_TEST_CASE(RunHighDimensions, NEMatMulFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::HighDimensionalMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F32), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunHighDimensions, + NEMatMulFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::HighDimensionalMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F32), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); } -FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, NEMatMulDynamicTensorsFixture, framework::DatasetMode::PRECOMMIT, - combine( - datasets::SmallMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F32), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), - make("NumberOfRuns", 5))) +FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, + NEMatMulDynamicTensorsFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F32), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}), +make("NumberOfRuns", 5))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp32); @@ -179,37 +203,58 @@ TEST_SUITE_END() // FP32 /* Note : MatMul BF16 is enabled by specifying FP32 datatype and enabling the fast math setting */ constexpr AbsoluteTolerance tolerance_bf16(0.02f); TEST_SUITE(BF16) -FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFastMathFixture, framework::DatasetMode::PRECOMMIT, - combine( - datasets::SmallMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F32), - make("ActivationInfo", { ActivationLayerInfo() }), - make("RunTimes", { 0 }), - make("Settings", { CpuMatMulSettings().fast_math(true) }), - make("LhsQInfo", { QuantizationInfo() }), - make("RhsQInfo", { QuantizationInfo() }), - make("OutQInfo", { QuantizationInfo() })) -) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEMatMulFastMathFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F32), + make("ActivationInfo", {ActivationLayerInfo()}), + make("RunTimes", {0}), + make("Settings", {CpuMatMulSettings().fast_math(true)}), + make("LhsQInfo", {QuantizationInfo()}), + make("RhsQInfo", {QuantizationInfo()}), + make("OutQInfo", {QuantizationInfo()}))) { // Validate output validate(Accessor(_target), _reference, tolerance_bf16); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFastMathFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::LargeMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F32), - make("ActivationInfo", { ActivationLayerInfo() }), - make("RunTimes", { 0 }), - make("Settings", { CpuMatMulSettings().fast_math(true) }), - make("LhsQInfo", { QuantizationInfo() }), - make("RhsQInfo", { QuantizationInfo() }), - make("OutQInfo", { QuantizationInfo() })) -) +FIXTURE_DATA_TEST_CASE(RunTinyFixedFormat, + NEMatMulFixedFormatFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::TinyMatMulDataset(), + make("TransposeA", {false}), + make("TransposeB", {false}), + make("DataType", DataType::BFLOAT16), + make("ActivationInfo", {ActivationLayerInfo()}), + make("RunTimes", {0}), + make("Settings", {CpuMatMulSettings().fast_math(true).fixed_format(true)}), + make("LhsQInfo", {QuantizationInfo()}), + make("RhsQInfo", {QuantizationInfo()}), + make("OutQInfo", {QuantizationInfo()}))) +{ + if (CPUInfo::get().has_bf16()) + { + // Validate output + validate(Accessor(_target), _reference, tolerance_bf16); + } +} + +FIXTURE_DATA_TEST_CASE(RunLarge, + NEMatMulFastMathFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F32), + make("ActivationInfo", {ActivationLayerInfo()}), + make("RunTimes", {0}), + make("Settings", {CpuMatMulSettings().fast_math(true)}), + make("LhsQInfo", {QuantizationInfo()}), + make("RhsQInfo", {QuantizationInfo()}), + make("OutQInfo", {QuantizationInfo()}))) { // Validate output validate(Accessor(_target), _reference, tolerance_bf16, 0.01 /* tolerance_num */); @@ -219,36 +264,51 @@ TEST_SUITE_END() // BF16 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, NEMatMulFixture, framework::DatasetMode::PRECOMMIT, - combine( - datasets::SmallMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F16), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEMatMulFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F16), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp16); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEMatMulFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::LargeMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F16), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }))) +FIXTURE_DATA_TEST_CASE(RunLarge, + NEMatMulFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F16), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp16); } -FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, NEMatMulDynamicTensorsFixture, framework::DatasetMode::PRECOMMIT, - combine( - datasets::SmallMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::F16), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), - make("NumberOfRuns", 5))) +FIXTURE_DATA_TEST_CASE(RunStressDynamicTensors, + NEMatMulDynamicTensorsFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::F16), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}), +make("NumberOfRuns", 5))) { // Validate output validate(Accessor(_target), _reference, tolerance_fp16); @@ -263,52 +323,64 @@ TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) -FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizedMatMulFixture, framework::DatasetMode::PRECOMMIT, - combine( - datasets::SmallMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::QASYMM8), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), - make("NumberOfExtraRuns", { 0, 1 }), - make("LhsQInfo", { QuantizationInfo(1.f / 50, 1) }), - make("RhsQInfo", { QuantizationInfo(1.f / 30, -1) }), - make("OutQInfo", { QuantizationInfo(1.f, 2) })) -) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEQuantizedMatMulFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::QASYMM8), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}), +make("NumberOfExtraRuns", {0, 1}), +make("LhsQInfo", {QuantizationInfo(1.f / 50, 1)}), +make("RhsQInfo", {QuantizationInfo(1.f / 30, -1)}), +make("OutQInfo", {QuantizationInfo(1.f, 2)}))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); } -FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::SmallerMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::QASYMM8), - make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) }), - make("NumberOfExtraRuns", { 0, 1 }), - make("LhsQInfo", { QuantizationInfo(1.f / 50, 1) }), - make("RhsQInfo", { QuantizationInfo(1.f / 30, -1) }), - make("OutQInfo", { QuantizationInfo(1.f, 2) })) -) +FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation, + NEQuantizedMatMulFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::SmallerMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::QASYMM8), + make("ActivationInfo", +{ + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) +}), +make("NumberOfExtraRuns", {0, 1}), +make("LhsQInfo", {QuantizationInfo(1.f / 50, 1)}), +make("RhsQInfo", {QuantizationInfo(1.f / 30, -1)}), +make("OutQInfo", {QuantizationInfo(1.f, 2)}))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::LargeMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::QASYMM8), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), - make("NumberOfExtraRuns", { 0, 1 }), - make("LhsQInfo", { QuantizationInfo(1.f / 100, 1) }), - make("RhsQInfo", { QuantizationInfo(1.f / 200, -1) }), - make("OutQInfo", { QuantizationInfo(1.f, 2) })) -) +FIXTURE_DATA_TEST_CASE(RunLarge, + NEQuantizedMatMulFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::QASYMM8), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}), +make("NumberOfExtraRuns", {0, 1}), +make("LhsQInfo", {QuantizationInfo(1.f / 100, 1)}), +make("RhsQInfo", {QuantizationInfo(1.f / 200, -1)}), +make("OutQInfo", {QuantizationInfo(1.f, 2)}))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -318,52 +390,64 @@ TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) -FIXTURE_DATA_TEST_CASE(RunSmall, NEQuantizedMatMulFixture, framework::DatasetMode::PRECOMMIT, - combine( - datasets::SmallMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::QASYMM8_SIGNED), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), - make("NumberOfExtraRuns", { 0, 1 }), - make("LhsQInfo", { QuantizationInfo(1.f / 40, -2) }), - make("RhsQInfo", { QuantizationInfo(1.f / 50, 1) }), - make("OutQInfo", { QuantizationInfo(1.f, 1) })) -) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEQuantizedMatMulFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::QASYMM8_SIGNED), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}), +make("NumberOfExtraRuns", {0, 1}), +make("LhsQInfo", {QuantizationInfo(1.f / 40, -2)}), +make("RhsQInfo", {QuantizationInfo(1.f / 50, 1)}), +make("OutQInfo", {QuantizationInfo(1.f, 1)}))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_signed); } -FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::SmallerMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::QASYMM8_SIGNED), - make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) }), - make("NumberOfExtraRuns", { 0, 1 }), - make("LhsQInfo", { QuantizationInfo(1.f / 40, -2) }), - make("RhsQInfo", { QuantizationInfo(1.f / 50, 1) }), - make("OutQInfo", { QuantizationInfo(1.f, 1) })) -) +FIXTURE_DATA_TEST_CASE(RunSmallExtraActivation, + NEQuantizedMatMulFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::SmallerMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::QASYMM8_SIGNED), + make("ActivationInfo", +{ + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) +}), +make("NumberOfExtraRuns", {0, 1}), +make("LhsQInfo", {QuantizationInfo(1.f / 40, -2)}), +make("RhsQInfo", {QuantizationInfo(1.f / 50, 1)}), +make("OutQInfo", {QuantizationInfo(1.f, 1)}))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_signed); } -FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizedMatMulFixture, framework::DatasetMode::NIGHTLY, - combine( - datasets::LargeMatMulDataset(), - make("TransposeA", { false, true }), - make("TransposeB", { false, true }), - make("DataType", DataType::QASYMM8_SIGNED), - make("ActivationInfo", { ActivationLayerInfo(), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) }), - make("NumberOfExtraRuns", { 0, 1 }), - make("LhsQInfo", { QuantizationInfo(1.f / 150, -2) }), - make("RhsQInfo", { QuantizationInfo(1.f / 250, 1) }), - make("OutQInfo", { QuantizationInfo(1.f, 1) })) -) +FIXTURE_DATA_TEST_CASE(RunLarge, + NEQuantizedMatMulFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeMatMulDataset(), + make("TransposeA", {false, true}), + make("TransposeB", {false, true}), + make("DataType", DataType::QASYMM8_SIGNED), + make("ActivationInfo", +{ + ActivationLayerInfo(), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) +}), +make("NumberOfExtraRuns", {0, 1}), +make("LhsQInfo", {QuantizationInfo(1.f / 150, -2)}), +make("RhsQInfo", {QuantizationInfo(1.f / 250, 1)}), +make("OutQInfo", {QuantizationInfo(1.f, 1)}))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_signed); @@ -372,7 +456,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEQuantizedMatMulFixture, framework::Da TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE_END() // Quantized -#endif // __aarch64__ +#endif // __aarch64__ TEST_SUITE_END() // MatMul TEST_SUITE_END() // NEON diff --git a/tests/validation/fixtures/MatMulFixture.h b/tests/validation/fixtures/MatMulFixture.h index 2e79612a37..ffd12e56d0 100644 --- a/tests/validation/fixtures/MatMulFixture.h +++ b/tests/validation/fixtures/MatMulFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023 Arm Limited. + * Copyright (c) 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,15 +27,17 @@ #include "arm_compute/core/Types.h" #include "arm_compute/core/Utils.h" #include "arm_compute/core/utils/quantization/AsymmHelpers.h" + #include "src/core/utils/quantization/AsymmHelpers.h" #include "tests/framework/Asserts.h" // Required for ARM_COMPUTE_ASSERT #include "tests/framework/Fixture.h" -#include "tests/validation/Validation.h" #include "tests/validation/reference/ActivationLayer.h" #include "tests/validation/reference/GEMM.h" #include "tests/validation/reference/GEMMLowp.h" #include "tests/validation/reference/Permute.h" #include "tests/validation/reference/ReshapeLayer.h" +#include "tests/validation/Validation.h" + #include #include #include @@ -50,32 +52,50 @@ template void fill(U &&tensor, int i, float lo = -1.f, float hi = 1.f) { - switch(tensor.data_type()) + switch (tensor.data_type()) { + case DataType::BFLOAT16: + { + arm_compute::utils::uniform_real_distribution_16bit distribution{float(lo), float(hi)}; + library->fill(tensor, distribution, i); + break; + } case DataType::F16: { - arm_compute::utils::uniform_real_distribution_16bit distribution{ float(lo), float(hi) }; + arm_compute::utils::uniform_real_distribution_16bit distribution{float(lo), float(hi)}; library->fill(tensor, distribution, i); break; } @@ -98,8 +118,18 @@ protected: } } - TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool transpose_a, bool transpose_b, DataType data_type, - ActivationLayerInfo act_info, int num_extra_runs, const Settings &settings, QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo) + virtual TensorType compute_target(const TensorShape &shape_a, + const TensorShape &shape_b, + const TensorShape &output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info, + int num_extra_runs, + const Settings &settings, + QuantizationInfo a_qinfo, + QuantizationInfo b_qinfo, + QuantizationInfo o_qinfo) { // 1. Create Classes and configure function // ---------------------------------------------------- @@ -137,7 +167,7 @@ protected: ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); // For multiple runs. - for(int i = 0; i < num_extra_runs; i++) + for (int i = 0; i < num_extra_runs; i++) { // Stress dynamic tensors by running multiple times. // -------------------------------------------------------- @@ -164,7 +194,12 @@ protected: template typename std::enable_if < !std::is_integral::value, SimpleTensor>::type - compute_reference_gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta, const QuantizationInfo &o_qinfo) + compute_reference_gemm(const SimpleTensor &a, + const SimpleTensor &b, + const SimpleTensor &c, + float alpha, + float beta, + const QuantizationInfo &o_qinfo) { ARM_COMPUTE_UNUSED(o_qinfo); @@ -173,7 +208,12 @@ protected: template typename std::enable_if::value, SimpleTensor>::type - compute_reference_gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta, const QuantizationInfo &o_qinfo) + compute_reference_gemm(const SimpleTensor &a, + const SimpleTensor &b, + const SimpleTensor &c, + float alpha, + float beta, + const QuantizationInfo &o_qinfo) { ARM_COMPUTE_UNUSED(alpha, beta); @@ -186,23 +226,30 @@ protected: int32_t output_multiplier = 0; int32_t output_shift = 0; quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift); - std::vector output_multipliers{ output_multiplier }; - std::vector output_shifts{ output_shift }; + std::vector output_multipliers{output_multiplier}; + std::vector output_shifts{output_shift}; //The lhs and rhs offsets are negated here to keep the reference aligned with the function implementation where the lhs and rhs offsets are also negated. - const auto tmp = reference::gemmlowp_matrix_multiply_core( - a, b, c.shape(), -aq.offset, -bq.offset); + const auto tmp = reference::gemmlowp_matrix_multiply_core(a, b, c.shape(), -aq.offset, -bq.offset); auto output = reference::gemmlowp_quantize_down_scale_by_fixedpoint( - tmp, output_multipliers, output_shifts, oq.offset, - std::numeric_limits::lowest(), std::numeric_limits::max()); + tmp, output_multipliers, output_shifts, oq.offset, std::numeric_limits::lowest(), + std::numeric_limits::max()); output.quantization_info(o_qinfo); return output; } - SimpleTensor compute_reference(const TensorShape &a_shape, const TensorShape &b_shape, const TensorShape &output_shape, bool transpose_a, bool transpose_b, DataType data_type, - ActivationLayerInfo act_info, QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo) + SimpleTensor compute_reference(const TensorShape &a_shape, + const TensorShape &b_shape, + const TensorShape &output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info, + QuantizationInfo a_qinfo, + QuantizationInfo b_qinfo, + QuantizationInfo o_qinfo) { // We collapse dimensions > 2 onto dimension 2, i.e. 4D+ tensors will look like 3D // This is necessary unless we choose to extend gemm reference for 4D+ tensors @@ -211,9 +258,9 @@ protected: TensorShape b_shape_collapsed = b_shape.collapsed_from(Window::DimZ); // Create reference - SimpleTensor a{ a_shape_collapsed, data_type, 1, a_qinfo }; - SimpleTensor b{ b_shape_collapsed, data_type, 1, b_qinfo }; - SimpleTensor c{ output_shape_collapsed, data_type, 1 }; + SimpleTensor a{a_shape_collapsed, data_type, 1, a_qinfo}; + SimpleTensor b{b_shape_collapsed, data_type, 1, b_qinfo}; + SimpleTensor c{output_shape_collapsed, data_type, 1}; // Fill reference fill(a, 2); @@ -234,16 +281,16 @@ protected: b_transposed_shape.set(1, b.shape().x()); // Define transposed tensors - SimpleTensor a_transposed{ a_transposed_shape, data_type }; - SimpleTensor b_transposed{ b_transposed_shape, data_type }; + SimpleTensor a_transposed{a_transposed_shape, data_type}; + SimpleTensor b_transposed{b_transposed_shape, data_type}; // pretranspose a if necessary - if(transpose_a) + if (transpose_a) { a_transposed = reference::permute(a, PermutationVector(1U, 0U)); } // pretranspose b if necessary - if(transpose_b) + if (transpose_b) { b_transposed = reference::permute(b, PermutationVector(1U, 0U)); } @@ -251,12 +298,13 @@ protected: // Setting beta to 0 will effectively disable C for the // computation of the reference: alpha * A * B + 0 * C // Use transposed tensors if boolean enabled else use original tensors - auto result = compute_reference_gemm((transpose_a) ? a_transposed : a, (transpose_b) ? b_transposed : b, c, 1.0f, 0.f, o_qinfo); + auto result = compute_reference_gemm((transpose_a) ? a_transposed : a, (transpose_b) ? b_transposed : b, c, + 1.0f, 0.f, o_qinfo); result = reference::activation_layer(result, act_info, o_qinfo); // We reshape the gemm output back if the tensor is high dimensional - if(output_shape_collapsed != output_shape) + if (output_shape_collapsed != output_shape) { result = reference::reshape_layer(result, output_shape); } @@ -268,72 +316,293 @@ protected: SimpleTensor _reference{}; }; +/// TODO: (ONCPUML-1451) The current state of this fixture is interim and a longer-term testing method will be implemented later. +/// @note: Currently we support only a 2x2 test due to the lack of reorder ref. implementation. +template +class MatMulFixedFormatFixture + : public MatMulGenericValidationFixture +{ +public: + TensorType compute_target(const TensorShape &shape_a, + const TensorShape &shape_b, + const TensorShape &output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info, + int num_extra_runs, + const Settings &settings, + QuantizationInfo a_qinfo, + QuantizationInfo b_qinfo, + QuantizationInfo o_qinfo) override + { + // 1. Create Classes and configure function + // ---------------------------------------------------- + // Create tensors + // Configure relevant classes and matmul function + TensorType a = create_tensor(shape_a, data_type, 1, a_qinfo); + TensorType b = create_tensor(shape_b, data_type, 1, b_qinfo); + TensorType dst = create_tensor(output_shape, data_type, 1, o_qinfo); + + const auto weight_tensor_info = TensorInfo(*b.info()); + const TensorInfo new_tensor_info = prepare_weights(weight_tensor_info); + TensorType weights_transformed = create_tensor(new_tensor_info); + + // Configure MatMulInfo class + MatMulInfo mm_info; + mm_info.adj_lhs(transpose_a).adj_rhs(transpose_b); + + // Ensure values are dynamic + a.info()->set_are_values_constant(false); + b.info()->set_are_values_constant(false); + weights_transformed.info()->set_are_values_constant(false); + + FunctionType matmul; + + // Configure operator + matmul.configure(&a, &weights_transformed, &dst, mm_info, settings, act_info); + + // Assertions + ARM_COMPUTE_ASSERT(a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(dst.info()->is_resizable()); + ARM_COMPUTE_ASSERT(weights_transformed.info()->is_resizable()); + + // Allocate tensors + a.allocator()->allocate(); + b.allocator()->allocate(); + dst.allocator()->allocate(); + weights_transformed.allocator()->allocate(); + + ARM_COMPUTE_ASSERT(!a.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!b.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!dst.info()->is_resizable()); + ARM_COMPUTE_ASSERT(!weights_transformed.info()->is_resizable()); + + // For multiple runs. + for (int i = 0; i < num_extra_runs; i++) + { + // Stress dynamic tensors by running multiple times. + // -------------------------------------------------------- + // Fill tensors with new seed + // Run function + const int seed_offset = num_extra_runs * 100; + this->fill(AccessorType(a), seed_offset); + this->fill(AccessorType(b), seed_offset + 1); + + matmul.run(); + } + + // 2. Final Run for reference comparison + // -------------------------------------------------------- + // Re-fill tensors same seed as reference run + // Compute MatMul operation + this->fill(AccessorType(a), 2); + this->fill(AccessorType(b), 3); + + rearrange_data(AccessorType(b), AccessorType(weights_transformed)); + + matmul.run(); + + return dst; + } + + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info, + int num_extra_runs, + Settings settings, + QuantizationInfo a_qinfo, + QuantizationInfo b_qinfo, + QuantizationInfo o_qinfo) + { + if (CPUInfo::get().has_bf16()) + { + MatMulGenericValidationFixture::setup( + shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, settings, + a_qinfo, b_qinfo, o_qinfo); + } + } + +private: + TensorInfo prepare_weights(const TensorInfo tensor_info) + { + const DataLayout data_layout = tensor_info.data_layout(); + ARM_COMPUTE_EXPECT(data_layout == DataLayout::NCHW, framework::LogLevel::ERRORS); + const DataType data_type = tensor_info.data_type(); + const TensorShape tensor_shape = tensor_info.tensor_shape(); + const int H = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)]; + const int W = tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)]; + ARM_COMPUTE_EXPECT(H <= 2 && W <= 2, framework::LogLevel::ERRORS); + + arm_compute::Strides strides_in_bytes = tensor_info.strides_in_bytes(); + strides_in_bytes.set(1, 32); + strides_in_bytes.set(2, 32); + + const size_t offset_first_element_in_bytes = tensor_info.offset_first_element_in_bytes(); + const size_t total_size_in_bytes = 32; + + const TensorShape TS(H, W); + + TensorInfo new_tensor_info = tensor_info; + new_tensor_info.init(TS, tensor_info.num_channels(), data_type, strides_in_bytes, offset_first_element_in_bytes, + total_size_in_bytes); + + return new_tensor_info; + } + + void rearrange_data(const AccessorType src, AccessorType dst) + { + const TensorShape src_tensor_shape = src.shape(); + const DataLayout data_layout = src.data_layout(); + ARM_COMPUTE_EXPECT(data_layout == DataLayout::NCHW, framework::LogLevel::ERRORS); + const unsigned int O = + src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES)]; // N=O + const unsigned int H = + src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT)]; + const unsigned int W = + src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH)]; + const unsigned int I = + src_tensor_shape[get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL)]; // C=I + ARM_COMPUTE_EXPECT(H <= 2 && W <= 2, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(I == 1 && O == 1, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(src.num_elements() <= dst.num_elements(), framework::LogLevel::ERRORS); + + const T *src_ptr = reinterpret_cast(src.data()); + T *dst_ptr = reinterpret_cast(dst.data()); + + // rearrange indexes for 2x2 input and weight + int dst_idx[] = {0, 4, 1, 5}; + for (int i = 0; i < 4; i++) + { + dst_ptr[dst_idx[i]] = src_ptr[i]; + } + } +}; + template -class MatMulValidationFixture : public MatMulGenericValidationFixture +class MatMulValidationFixture + : public MatMulGenericValidationFixture { public: - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type) + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type) { - MatMulGenericValidationFixture::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, ActivationLayerInfo(), 0, - Settings()); + MatMulGenericValidationFixture::setup( + shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, ActivationLayerInfo(), 0, Settings()); } }; template -class MatMulValidationWithDynamicTensorsFixture : public MatMulGenericValidationFixture +class MatMulValidationWithDynamicTensorsFixture + : public MatMulGenericValidationFixture { public: - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs) + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info, + int num_extra_runs) { - MatMulGenericValidationFixture::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings()); + MatMulGenericValidationFixture::setup( + shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings()); } }; template -class QuantizedMatMulValidationFixture : public MatMulGenericValidationFixture +class QuantizedMatMulValidationFixture + : public MatMulGenericValidationFixture { public: - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info, int num_extra_runs, - QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo) + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info, + int num_extra_runs, + QuantizationInfo a_qinfo, + QuantizationInfo b_qinfo, + QuantizationInfo o_qinfo) { - MatMulGenericValidationFixture::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(), - a_qinfo, b_qinfo, o_qinfo); + MatMulGenericValidationFixture::setup( + shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(), + a_qinfo, b_qinfo, o_qinfo); } }; template -class MatMulValidationWithActivationFixture : public MatMulGenericValidationFixture +class MatMulValidationWithActivationFixture + : public MatMulGenericValidationFixture { public: - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo act_info) + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo act_info) { - MatMulGenericValidationFixture::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings()); + MatMulGenericValidationFixture::setup( + shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings()); } }; template -class MatMulValidationWithActivationAlphaBetaFixture : public MatMulGenericValidationFixture +class MatMulValidationWithActivationAlphaBetaFixture + : public MatMulGenericValidationFixture { public: - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo::ActivationFunction function, - float alpha_beta) + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo::ActivationFunction function, + float alpha_beta) { ActivationLayerInfo act_info(function, alpha_beta, alpha_beta); - MatMulGenericValidationFixture::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings()); + MatMulGenericValidationFixture::setup( + shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, 0, Settings()); } }; template -class QuantizedMatMulValidationWithActivationFixture : public MatMulGenericValidationFixture +class QuantizedMatMulValidationWithActivationFixture + : public MatMulGenericValidationFixture { public: - void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool transpose_a, bool transpose_b, DataType data_type, ActivationLayerInfo::ActivationFunction function, - float alpha_beta, int num_extra_runs, - QuantizationInfo a_qinfo, QuantizationInfo b_qinfo, QuantizationInfo o_qinfo) + void setup(TensorShape shape_a, + TensorShape shape_b, + TensorShape output_shape, + bool transpose_a, + bool transpose_b, + DataType data_type, + ActivationLayerInfo::ActivationFunction function, + float alpha_beta, + int num_extra_runs, + QuantizationInfo a_qinfo, + QuantizationInfo b_qinfo, + QuantizationInfo o_qinfo) { ActivationLayerInfo act_info(function, alpha_beta, alpha_beta); - MatMulGenericValidationFixture::setup(shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(), - a_qinfo, b_qinfo, o_qinfo); + MatMulGenericValidationFixture::setup( + shape_a, shape_b, output_shape, transpose_a, transpose_b, data_type, act_info, num_extra_runs, Settings(), + a_qinfo, b_qinfo, o_qinfo); } }; diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp index 664b969125..2172362bdd 100644 --- a/tests/validation/reference/ActivationLayer.cpp +++ b/tests/validation/reference/ActivationLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2020,2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,6 +24,7 @@ #include "ActivationLayer.h" #include "arm_compute/core/Types.h" + #include "tests/validation/Helpers.h" namespace arm_compute @@ -40,7 +41,7 @@ SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo ARM_COMPUTE_UNUSED(oq_info); // Create reference - SimpleTensor dst{ src.shape(), src.data_type(), 1 }; + SimpleTensor dst{src.shape(), src.data_type(), 1}; // Compute reference const T a(info.a()); @@ -48,7 +49,7 @@ SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo #if defined(_OPENMP) #pragma omp parallel for #endif /* _OPENMP */ - for(int i = 0; i < src.num_elements(); ++i) + for (int i = 0; i < src.num_elements(); ++i) { dst[i] = activate_float(src[i], a, b, info.activation()); } @@ -57,7 +58,8 @@ SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo } template <> -SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info) +SimpleTensor +activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info) { const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info; @@ -68,7 +70,8 @@ SimpleTensor activation_layer(const SimpleTensor &src } template <> -SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info) +SimpleTensor +activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info) { const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info; @@ -79,7 +82,8 @@ SimpleTensor activation_layer(const SimpleTensor &src, A } template <> -SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info) +SimpleTensor +activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info) { const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info; @@ -88,9 +92,14 @@ SimpleTensor activation_layer(const SimpleTensor &src SimpleTensor dst = convert_to_symmetric(dst_tmp, dst_qinfo); return dst; } -template SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info); -template SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info); -template SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info); +template SimpleTensor +activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info); +template SimpleTensor +activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info); +template SimpleTensor +activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info); +template SimpleTensor +activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/ActivationLayer.h b/tests/validation/reference/ActivationLayer.h index a813ba5037..7f896bd696 100644 --- a/tests/validation/reference/ActivationLayer.h +++ b/tests/validation/reference/ActivationLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, 2022 Arm Limited. + * Copyright (c) 2017-2020,2022,2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -21,8 +21,8 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#ifndef ARM_COMPUTE_TEST_ACTIVATION_LAYER_H -#define ARM_COMPUTE_TEST_ACTIVATION_LAYER_H +#ifndef ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H +#define ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H #include "tests/SimpleTensor.h" #include "tests/validation/Helpers.h" @@ -40,7 +40,7 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a { T ret; - switch(activation) + switch (activation) { case ActivationLayerInfo::ActivationFunction::ABS: ret = std::abs(x); @@ -61,13 +61,13 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a ret = std::min(a, std::max(b, x)); break; case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: - ret = (x > 0) ? x : a * x; + ret = x > static_cast(0) ? x : static_cast(a * x); break; case ActivationLayerInfo::ActivationFunction::SOFT_RELU: ret = std::log(static_cast(1) + std::exp(static_cast(x))); break; case ActivationLayerInfo::ActivationFunction::ELU: - ret = (x > 0) ? x : a * (std::exp(x) - static_cast(1)); + ret = x > static_cast(0) ? x : static_cast(a * (std::exp(x) - static_cast(1))); break; case ActivationLayerInfo::ActivationFunction::SQRT: ret = std::sqrt(x); @@ -82,10 +82,11 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a ret = x; break; case ActivationLayerInfo::ActivationFunction::HARD_SWISH: - ret = x * ((std::min(std::max(static_cast(x + 3), static_cast(0.0f)), static_cast(6.0f))) * 0.166666667f); + ret = x * ((std::min(std::max(static_cast(x + 3), static_cast(0.0f)), static_cast(6.0f))) * + 0.166666667f); break; case ActivationLayerInfo::ActivationFunction::SWISH: - ret = static_cast(x) / (static_cast(1) + std::exp(-a*x)); + ret = static_cast(x) / (static_cast(1) + std::exp(-a * x)); break; case ActivationLayerInfo::ActivationFunction::GELU: ret = x * 0.5f * (1 + erf(x / std::sqrt(2.0f))); @@ -99,9 +100,11 @@ inline T activate_float(T x, T a, T b, ActivationLayerInfo::ActivationFunction a } template -SimpleTensor activation_layer(const SimpleTensor &src, ActivationLayerInfo info, const QuantizationInfo &oq_info = QuantizationInfo()); +SimpleTensor activation_layer(const SimpleTensor &src, + ActivationLayerInfo info, + const QuantizationInfo &oq_info = QuantizationInfo()); } // namespace reference } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_ACTIVATION_LAYER_H */ +#endif // ACL_TESTS_VALIDATION_REFERENCE_ACTIVATIONLAYER_H diff --git a/tests/validation/reference/DepthConvertLayer.cpp b/tests/validation/reference/DepthConvertLayer.cpp index 1e4939129e..3f88897f8e 100644 --- a/tests/validation/reference/DepthConvertLayer.cpp +++ b/tests/validation/reference/DepthConvertLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, 2023 Arm Limited. + * Copyright (c) 2017-2020, 2023-2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -165,7 +165,7 @@ template SimpleTensor depth_convert(const SimpleTensor &src, Data template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); // BFLOAT16 -template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); +template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); // F16 template SimpleTensor depth_convert(const SimpleTensor &src, DataType dt_out, ConvertPolicy policy, uint32_t shift); diff --git a/tests/validation/reference/GEMM.cpp b/tests/validation/reference/GEMM.cpp index f7e97e47b8..20f1139a02 100644 --- a/tests/validation/reference/GEMM.cpp +++ b/tests/validation/reference/GEMM.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021,2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -35,10 +35,11 @@ namespace validation namespace reference { template ::value, int>::type> -SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta) +SimpleTensor +gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta) { // Create reference - SimpleTensor dst{ c.shape(), c.data_type(), 1 }; + SimpleTensor dst{c.shape(), c.data_type(), 1}; // Compute reference const int M = a.shape().y(); @@ -50,15 +51,22 @@ SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const S const int a_stride_z = K * M; const int a_stride_w = K * M * D; - const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions - int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions + const int b_stride_z = + b.shape().num_dimensions() > 2 + ? N * K + : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions + int b_stride_w = + b.shape().num_dimensions() > 3 + ? K * N * D + : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions // Note: There are 3 gemm types: batched-gemm, multi-gemm, and batched of multi-gemms. The third dimension of tensor b is overloaded when tensor b has exactly 3 dimensions: // it can be either number of batches or multis. Batched-GEMM computation is detected only when the third dimension of "a" and "c" tensors is 1 and the number of dimensions is 4 - const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1; + const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && + c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1; // Batched-GEMM - if(is_batched_gemm) + if (is_batched_gemm) { b_stride_w = b_stride_z; } @@ -69,21 +77,21 @@ SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const S #if defined(_OPENMP) && !(defined(__arm__) && defined(__ANDROID__)) #pragma omp parallel for collapse(2) #endif /* _OPENMP */ - for(int w = 0; w < W; ++w) + for (int w = 0; w < W; ++w) { - for(int depth = 0; depth < D; ++depth) + for (int depth = 0; depth < D; ++depth) { const int base_addr_a = depth * a_stride_z + w * a_stride_w; const int base_addr_b = depth * b_stride_z + w * b_stride_w; const int base_addr_c = depth * c_stride_z + w * c_stride_w; - for(int row = 0; row < M; ++row) + for (int row = 0; row < M; ++row) { - for(int col = 0; col < N; ++col) + for (int col = 0; col < N; ++col) { T acc(0); - for(int k = 0; k < K; ++k) + for (int k = 0; k < K; ++k) { acc += a[base_addr_a + k + row * K] * b[base_addr_b + col + k * N]; } @@ -99,11 +107,12 @@ SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const S } template ::value, int>::type> -SimpleTensor gemm_mixed_precision(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta) +SimpleTensor gemm_mixed_precision( + const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta) { // GEMM mixed-precision combines F32 accumulators with F16 multiplications // Create reference - SimpleTensor dst{ c.shape(), c.data_type(), 1 }; + SimpleTensor dst{c.shape(), c.data_type(), 1}; // Compute reference const int M = a.shape().y(); @@ -115,15 +124,22 @@ SimpleTensor gemm_mixed_precision(const SimpleTensor &a, const SimpleTenso const int a_stride_z = K * M; const int a_stride_w = K * M * D; - const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions - int b_stride_w = b.shape().num_dimensions() > 3 ? K * N * D : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions + const int b_stride_z = + b.shape().num_dimensions() > 2 + ? N * K + : 0; // Do not slide the matrix B along the 3th dimension in case matrix B has less than 3 dimensions + int b_stride_w = + b.shape().num_dimensions() > 3 + ? K * N * D + : 0; // Do not slide the matrix B along the 4th dimension in case matrix B has less than 4 dimensions // Note: There are 3 gemm types: batched-gemm, multi-gemm, and batched of multi-gemms. The third dimension of tensor b is overloaded when tensor b has exactly 3 dimensions: // it can be either number of batches or multis. Batched-GEMM computation is detected only when the third dimension of "a" and "c" tensors is 1 and the number of dimensions is 4 - const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1; + const bool is_batched_gemm = b.shape().num_dimensions() == 3 && a.shape().num_dimensions() == 4 && + c.shape().num_dimensions() == 4 && a.shape()[2] == 1 && c.shape()[2] == 1; // Batched-GEMM - if(is_batched_gemm) + if (is_batched_gemm) { b_stride_w = b_stride_z; } @@ -134,27 +150,28 @@ SimpleTensor gemm_mixed_precision(const SimpleTensor &a, const SimpleTenso #if defined(_OPENMP) && !(defined(__arm__) && defined(__ANDROID__)) #pragma omp parallel for collapse(2) #endif /* _OPENMP */ - for(int w = 0; w < W; ++w) + for (int w = 0; w < W; ++w) { - for(int depth = 0; depth < D; ++depth) + for (int depth = 0; depth < D; ++depth) { const int base_addr_a = depth * a_stride_z + w * a_stride_w; const int base_addr_b = depth * b_stride_z + w * b_stride_w; const int base_addr_c = depth * c_stride_z + w * c_stride_w; - for(int row = 0; row < M; ++row) + for (int row = 0; row < M; ++row) { - for(int col = 0; col < N; ++col) + for (int col = 0; col < N; ++col) { float acc(0); - for(int k = 0; k < K; ++k) + for (int k = 0; k < K; ++k) { acc += static_cast(a[base_addr_a + k + row * K] * b[base_addr_b + col + k * N]); } // Finalize the result: alpha * A * B + beta * C - dst[base_addr_c + col + row * N] = static_cast(alpha * acc + beta * c[base_addr_c + col + row * N]); + dst[base_addr_c + col + row * N] = + static_cast(alpha * acc + beta * c[base_addr_c + col + row * N]); } } } @@ -163,9 +180,17 @@ SimpleTensor gemm_mixed_precision(const SimpleTensor &a, const SimpleTenso return dst; } -template SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta); -template SimpleTensor gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta); -template SimpleTensor gemm_mixed_precision(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta); +template SimpleTensor +gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta); +template SimpleTensor gemm(const SimpleTensor &a, + const SimpleTensor &b, + const SimpleTensor &c, + float alpha, + float beta); +template SimpleTensor +gemm(const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta); +template SimpleTensor gemm_mixed_precision( + const SimpleTensor &a, const SimpleTensor &b, const SimpleTensor &c, float alpha, float beta); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/Permute.cpp b/tests/validation/reference/Permute.cpp index 6f122b1bf5..7aa3011d8f 100644 --- a/tests/validation/reference/Permute.cpp +++ b/tests/validation/reference/Permute.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 Arm Limited. + * Copyright (c) 2017-2019,2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,6 +24,7 @@ #include "Permute.h" #include "arm_compute/core/Types.h" + #include "tests/validation/Helpers.h" namespace arm_compute @@ -42,11 +43,11 @@ SimpleTensor permute(const SimpleTensor &src, PermutationVector perm) permute(dst_shape, perm); // Create reference - SimpleTensor dst{ dst_shape, src.data_type(), src.num_channels(), src.quantization_info() }; + SimpleTensor dst{dst_shape, src.data_type(), src.num_channels(), src.quantization_info()}; // Compute reference const uint32_t num_elements = src.num_elements(); - for(uint32_t i = 0; i < num_elements; ++i) + for (uint32_t i = 0; i < num_elements; ++i) { const Coordinates src_coords = index2coord(src.shape(), i); Coordinates dst_coords = src_coords; @@ -58,13 +59,14 @@ SimpleTensor permute(const SimpleTensor &src, PermutationVector perm) return dst; } -template SimpleTensor permute(const SimpleTensor &src, PermutationVector perm); -template SimpleTensor permute(const SimpleTensor &src, PermutationVector perm); -template SimpleTensor permute(const SimpleTensor &src, PermutationVector perm); +template SimpleTensor permute(const SimpleTensor &src, PermutationVector perm); +template SimpleTensor permute(const SimpleTensor &src, PermutationVector perm); +template SimpleTensor permute(const SimpleTensor &src, PermutationVector perm); template SimpleTensor permute(const SimpleTensor &src, PermutationVector perm); template SimpleTensor permute(const SimpleTensor &src, PermutationVector perm); -template SimpleTensor permute(const SimpleTensor &src, PermutationVector perm); -template SimpleTensor permute(const SimpleTensor &src, PermutationVector perm); +template SimpleTensor permute(const SimpleTensor &src, PermutationVector perm); +template SimpleTensor permute(const SimpleTensor &src, PermutationVector perm); +template SimpleTensor permute(const SimpleTensor &src, PermutationVector perm); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/ReshapeLayer.cpp b/tests/validation/reference/ReshapeLayer.cpp index daea001be6..30a58dd65b 100644 --- a/tests/validation/reference/ReshapeLayer.cpp +++ b/tests/validation/reference/ReshapeLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Arm Limited. + * Copyright (c) 2017,2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -44,14 +44,15 @@ SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &out return dst; } -template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); -template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); +template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); +template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); -template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); +template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); -template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); -template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); -template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); +template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); +template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); +template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); +template SimpleTensor reshape_layer(const SimpleTensor &src, const TensorShape &output_shape); /** [ReshapeLayer] **/ } // namespace reference } // namespace validation diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h index 23e28d68a8..41ac11801f 100644 --- a/utils/TypePrinter.h +++ b/utils/TypePrinter.h @@ -3601,7 +3601,7 @@ inline ::std::ostream &operator<<(::std::ostream &os, const arm_compute::CpuMatM { os << "CpuMatMulSettings=" << "[" - << "fast_math=" << settings.fast_math() << "]"; + << "fast_math=" << settings.fast_math() << ",fixed_format=" << settings.fixed_format() << "]"; return os; } -- cgit v1.2.1