aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2023-01-29 13:24:24 +0000
committerGunes Bayir <gunes.bayir@arm.com>2023-02-01 09:59:30 +0000
commitae72a46e495742863dba44fcf5fdc673c9d2afbc (patch)
tree65bab43d0feddaa66b160ac7dc746651dc7c48de
parentec320d9fc418e2d95a3a38ce87233397535f467d (diff)
downloadComputeLibrary-ae72a46e495742863dba44fcf5fdc673c9d2afbc.tar.gz
Add new operator AddMulAdd for Neon™ backend for Float/Quantized types
This is a fused operator that merges Add + Mul + Add [+ Relu-based-Activation] layers and have an intermediate output after the first Add. It's supported for FP16/32/QASYMM8/QASYMM8_SIGNED data types. The subsequent Add and Mul are intended for scaling and the coefficients only have one dimension (per channel). The inputs are - input1 : nD tensor [X, Y, Z, W, ..] - input2 : nD tensor [X, Y, Z, W, ..] - add_coef : 1D tensor [X] - mul_coef : 1D tensor [X] The outputs are - out1 : nD tensor (intermediate output) [X, Y, Z, W, ..] - out2 : nD tensor (final output) [X, Y, Z, W, ..] The operation can be summarized as follows: out1 <- input1 + input2 out2 <- Act(out1 * mul_coef + add_coef) The activation function can be Identity, Relu, Bounded Relu or Lower/Upper Bounded Relu. The intermediate output can be skipped by providing a nullptr. The reason of providing this operator is to be able to fuse in case of Residual network patterns and save computations by reducing memory back and forward. Resolves: COMPMID-5463 Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Change-Id: I8ef577aa623b036e9a9f655cc088493fd19a6109 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9055 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp7
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h3
-rw-r--r--arm_compute/runtime/NEON/functions/NEAddMulAdd.h105
-rw-r--r--filelist.json15
-rw-r--r--src/BUILD.bazel7
-rw-r--r--src/CMakeLists.txt7
-rw-r--r--src/cpu/kernels/CpuAddMulAddKernel.cpp204
-rw-r--r--src/cpu/kernels/CpuAddMulAddKernel.h89
-rw-r--r--src/cpu/kernels/addmuladd/generic/neon/fp16.cpp949
-rw-r--r--src/cpu/kernels/addmuladd/generic/neon/fp32.cpp724
-rw-r--r--src/cpu/kernels/addmuladd/generic/neon/qasymm8.cpp834
-rw-r--r--src/cpu/kernels/addmuladd/generic/neon/qasymm8_signed.cpp834
-rw-r--r--src/cpu/kernels/addmuladd/list.h48
-rw-r--r--src/cpu/operators/CpuAddMulAdd.cpp139
-rw-r--r--src/cpu/operators/CpuAddMulAdd.h84
-rw-r--r--src/runtime/NEON/functions/NEAddMulAdd.cpp82
-rw-r--r--tests/validation/NEON/AddMulAdd.cpp230
-rw-r--r--tests/validation/fixtures/AddMulAddFixture.h268
18 files changed, 4628 insertions, 1 deletions
diff --git a/Android.bp b/Android.bp
index 23b19c2862..3af432218b 100644
--- a/Android.bp
+++ b/Android.bp
@@ -394,6 +394,7 @@ cc_library_static {
"src/cpu/CpuTensor.cpp",
"src/cpu/kernels/CpuActivationKernel.cpp",
"src/cpu/kernels/CpuAddKernel.cpp",
+ "src/cpu/kernels/CpuAddMulAddKernel.cpp",
"src/cpu/kernels/CpuCastKernel.cpp",
"src/cpu/kernels/CpuCol2ImKernel.cpp",
"src/cpu/kernels/CpuConcatenateBatchKernel.cpp",
@@ -451,6 +452,10 @@ cc_library_static {
"src/cpu/kernels/add/generic/neon/qasymm8.cpp",
"src/cpu/kernels/add/generic/neon/qasymm8_signed.cpp",
"src/cpu/kernels/add/generic/neon/qsymm16.cpp",
+ "src/cpu/kernels/addmuladd/generic/neon/fp16.cpp",
+ "src/cpu/kernels/addmuladd/generic/neon/fp32.cpp",
+ "src/cpu/kernels/addmuladd/generic/neon/qasymm8.cpp",
+ "src/cpu/kernels/addmuladd/generic/neon/qasymm8_signed.cpp",
"src/cpu/kernels/boundingboxtransform/generic/neon/fp16.cpp",
"src/cpu/kernels/boundingboxtransform/generic/neon/fp32.cpp",
"src/cpu/kernels/boundingboxtransform/generic/neon/impl.cpp",
@@ -551,6 +556,7 @@ cc_library_static {
"src/cpu/kernels/sub/neon/qsymm16.cpp",
"src/cpu/operators/CpuActivation.cpp",
"src/cpu/operators/CpuAdd.cpp",
+ "src/cpu/operators/CpuAddMulAdd.cpp",
"src/cpu/operators/CpuCast.cpp",
"src/cpu/operators/CpuConcatenate.cpp",
"src/cpu/operators/CpuConv2d.cpp",
@@ -873,6 +879,7 @@ cc_library_static {
"src/runtime/NEON/INESimpleFunction.cpp",
"src/runtime/NEON/INESimpleFunctionNoBorder.cpp",
"src/runtime/NEON/functions/NEActivationLayer.cpp",
+ "src/runtime/NEON/functions/NEAddMulAdd.cpp",
"src/runtime/NEON/functions/NEArgMinMaxLayer.cpp",
"src/runtime/NEON/functions/NEArithmeticAddition.cpp",
"src/runtime/NEON/functions/NEArithmeticSubtraction.cpp",
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index a679e8c04e..8dbe6c59c3 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2022 Arm Limited.
+ * Copyright (c) 2016-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_NEFUNCTIONS_H
#include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEAddMulAdd.h"
#include "arm_compute/runtime/NEON/functions/NEArgMinMaxLayer.h"
#include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
#include "arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h"
diff --git a/arm_compute/runtime/NEON/functions/NEAddMulAdd.h b/arm_compute/runtime/NEON/functions/NEAddMulAdd.h
new file mode 100644
index 0000000000..28185f338f
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NEAddMulAdd.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEADDMULADD
+#define ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEADDMULADD
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+class ITensor;
+class ITensorInfo;
+
+/** Function to compute Add+Mul+Add fused operation */
+class NEAddMulAdd : public IFunction
+{
+public:
+ /** Constructor */
+ NEAddMulAdd(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEAddMulAdd(const NEAddMulAdd &) = delete;
+ /** Prevent instances of this class from being moved (As this class contains non movable objects) */
+ NEAddMulAdd(NEAddMulAdd &&) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEAddMulAdd &operator=(const NEAddMulAdd &) = delete;
+ /** Prevent instances of this class from being moved (As this class contains non movable objects) */
+ NEAddMulAdd &operator=(NEAddMulAdd &&) = delete;
+ /** Destructor */
+ ~NEAddMulAdd();
+ /** Initialize the function's inputs and outputs.
+ *
+ * Valid data layouts:
+ * - Any
+ *
+ * Valid data type configurations:
+ * |input1 |input2 |bn_mul |bn_add |add_output |final_output |
+ * |:--------------|:--------------|:--------------|:--------------|:--------------|:--------------|
+ * |QASYMM8 |QASYMM8 |QASYMM8 |QASYMM8 |QASYMM8 |QASYMM8 |
+ * |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |QASYMM8_SIGNED |
+ * |F16 |F16 |F16 |F16 |F16 |F16 |
+ * |F32 |F32 |F32 |F32 |F32 |F32 |
+ *
+ * This is what this composite function (tailored for add followed by a batch norm operation) does:
+ * add_output <- input1 + input2 (add)
+ * final_output <- add_output * bn_mul + bn_add (batch norm = mul+add)
+ *
+ * @param[in] input1 First tensor input. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+ * @param[in] input2 Second tensor input. Data types supported: Same as @p input.
+ * @param[in] bn_mul The multiplication coefficient on the feature dimension. Data types supported: Same as @p input.
+ * It's one dimensional tensor with size equal to the feature maps [FM]
+ * @param[in] bn_add The addition coefficient on the feature dimension. Data types supported: Same as @p input.
+ * It's one dimensional tensor with size equal to the feature maps [FM]
+ * @param[out] add_output Output of the first add. Data type supported: Same as @p input.
+ * @param[out] final_output Output of the add+mul+add+act composite operation. Data type supported: Same as @p input.
+ * @param[in] policy Policy to handle overflow
+ * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+ *
+ */
+ void configure(ITensor *input1, ITensor *input2, ITensor *bn_mul, ITensor *bn_add,
+ ITensor *add_output, ITensor *final_output,
+ ConvertPolicy policy, const ActivationLayerInfo &act_info);
+ /** Static function to check if given info will lead to a valid configuration of @ref NEAddMulAdd
+ *
+ * Similar to @ref NEAddMulAdd::configure() except the arguments are @ref ITensorInfo * instead of @ref ITensor *
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2,
+ const ITensorInfo *bn_mul, const ITensorInfo *bn_add,
+ const ITensorInfo *add_output, const ITensorInfo *final_output,
+ ConvertPolicy policy, const ActivationLayerInfo &act_info);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ struct Impl;
+ std::unique_ptr<Impl> _impl;
+};
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_RUNTIME_NEON_FUNCTIONS_NEADDMULADD */
diff --git a/filelist.json b/filelist.json
index aec4fa8188..ea75c4ada1 100644
--- a/filelist.json
+++ b/filelist.json
@@ -922,6 +922,21 @@
}
}
},
+ "AddMulAdd": {
+ "files": {
+ "common": [
+ "src/cpu/operators/CpuAddMulAdd.cpp",
+ "src/cpu/kernels/CpuAddMulAddKernel.cpp",
+ "src/runtime/NEON/functions/NEAddMulAdd.cpp"
+ ],
+ "neon": {
+ "fp32":["src/cpu/kernels/addmuladd/generic/neon/fp32.cpp"],
+ "fp16":["src/cpu/kernels/addmuladd/generic/neon/fp16.cpp"],
+ "qasymm8": ["src/cpu/kernels/addmuladd/generic/neon/qasymm8.cpp"],
+ "qasymm8_signed": ["src/cpu/kernels/addmuladd/generic/neon/qasymm8_signed.cpp"]
+ }
+ }
+ },
"BatchNormalize": {
"files": {
"common": [
diff --git a/src/BUILD.bazel b/src/BUILD.bazel
index 93f7d1acb9..d8b0127179 100644
--- a/src/BUILD.bazel
+++ b/src/BUILD.bazel
@@ -653,6 +653,7 @@ filegroup(
"cpu/CpuTensor.cpp",
"cpu/kernels/CpuActivationKernel.cpp",
"cpu/kernels/CpuAddKernel.cpp",
+ "cpu/kernels/CpuAddMulAddKernel.cpp",
"cpu/kernels/CpuCastKernel.cpp",
"cpu/kernels/CpuCol2ImKernel.cpp",
"cpu/kernels/CpuConcatenateBatchKernel.cpp",
@@ -710,6 +711,10 @@ filegroup(
"cpu/kernels/add/generic/neon/qasymm8.cpp",
"cpu/kernels/add/generic/neon/qasymm8_signed.cpp",
"cpu/kernels/add/generic/neon/qsymm16.cpp",
+ "cpu/kernels/addmuladd/generic/neon/fp16.cpp",
+ "cpu/kernels/addmuladd/generic/neon/fp32.cpp",
+ "cpu/kernels/addmuladd/generic/neon/qasymm8.cpp",
+ "cpu/kernels/addmuladd/generic/neon/qasymm8_signed.cpp",
"cpu/kernels/boundingboxtransform/generic/neon/fp16.cpp",
"cpu/kernels/boundingboxtransform/generic/neon/fp32.cpp",
"cpu/kernels/boundingboxtransform/generic/neon/impl.cpp",
@@ -810,6 +815,7 @@ filegroup(
"cpu/kernels/sub/neon/qsymm16.cpp",
"cpu/operators/CpuActivation.cpp",
"cpu/operators/CpuAdd.cpp",
+ "cpu/operators/CpuAddMulAdd.cpp",
"cpu/operators/CpuCast.cpp",
"cpu/operators/CpuConcatenate.cpp",
"cpu/operators/CpuConv2d.cpp",
@@ -867,6 +873,7 @@ filegroup(
"runtime/NEON/INESimpleFunction.cpp",
"runtime/NEON/INESimpleFunctionNoBorder.cpp",
"runtime/NEON/functions/NEActivationLayer.cpp",
+ "runtime/NEON/functions/NEAddMulAdd.cpp",
"runtime/NEON/functions/NEArgMinMaxLayer.cpp",
"runtime/NEON/functions/NEArithmeticAddition.cpp",
"runtime/NEON/functions/NEArithmeticSubtraction.cpp",
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 77f68056d4..c985dbcc31 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -645,6 +645,7 @@ target_sources(
cpu/CpuTensor.cpp
cpu/kernels/CpuActivationKernel.cpp
cpu/kernels/CpuAddKernel.cpp
+ cpu/kernels/CpuAddMulAddKernel.cpp
cpu/kernels/CpuCastKernel.cpp
cpu/kernels/CpuCol2ImKernel.cpp
cpu/kernels/CpuConcatenateBatchKernel.cpp
@@ -702,6 +703,10 @@ target_sources(
cpu/kernels/add/generic/neon/qasymm8.cpp
cpu/kernels/add/generic/neon/qasymm8_signed.cpp
cpu/kernels/add/generic/neon/qsymm16.cpp
+ cpu/kernels/addmuladd/generic/neon/fp16.cpp
+ cpu/kernels/addmuladd/generic/neon/fp32.cpp
+ cpu/kernels/addmuladd/generic/neon/qasymm8.cpp
+ cpu/kernels/addmuladd/generic/neon/qasymm8_signed.cpp
cpu/kernels/boundingboxtransform/generic/neon/fp16.cpp
cpu/kernels/boundingboxtransform/generic/neon/fp32.cpp
cpu/kernels/boundingboxtransform/generic/neon/impl.cpp
@@ -802,6 +807,7 @@ target_sources(
cpu/kernels/sub/neon/qsymm16.cpp
cpu/operators/CpuActivation.cpp
cpu/operators/CpuAdd.cpp
+ cpu/operators/CpuAddMulAdd.cpp
cpu/operators/CpuCast.cpp
cpu/operators/CpuConcatenate.cpp
cpu/operators/CpuConv2d.cpp
@@ -859,6 +865,7 @@ target_sources(
runtime/NEON/INESimpleFunction.cpp
runtime/NEON/INESimpleFunctionNoBorder.cpp
runtime/NEON/functions/NEActivationLayer.cpp
+ runtime/NEON/functions/NEAddMulAdd.cpp
runtime/NEON/functions/NEArgMinMaxLayer.cpp
runtime/NEON/functions/NEArithmeticAddition.cpp
runtime/NEON/functions/NEArithmeticSubtraction.cpp
diff --git a/src/cpu/kernels/CpuAddMulAddKernel.cpp b/src/cpu/kernels/CpuAddMulAddKernel.cpp
new file mode 100644
index 0000000000..b84bdd54e9
--- /dev/null
+++ b/src/cpu/kernels/CpuAddMulAddKernel.cpp
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/cpu/kernels/CpuAddMulAddKernel.h"
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Validate.h"
+
+#include "src/core/CPP/Validate.h"
+#include "src/core/common/Registrars.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+#include "src/cpu/kernels/addmuladd/list.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+namespace
+{
+static const std::vector<CpuAddMulAddKernel::AddMulAddKernel> available_kernels =
+{
+#ifdef __aarch64__
+ {
+ "neon_fp32_add_mul_add",
+ [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F32); },
+ REGISTER_FP32_NEON(arm_compute::cpu::add_mul_add_fp32_neon)
+ },
+ {
+ "neon_fp16_add_mul_add",
+ [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16); },
+ REGISTER_FP16_NEON(arm_compute::cpu::add_mul_add_fp16_neon)
+ },
+ {
+ "neon_qasymm8_add_mul_add",
+ [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8); },
+ REGISTER_QASYMM8_NEON(arm_compute::cpu::add_mul_add_u8_neon)
+ },
+ {
+ "neon_qasymm8_signed_add_mul_add",
+ [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED); },
+ REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::add_mul_add_s8_neon)
+ }
+#endif // __aarch64__
+};
+
+Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2,
+ const ITensorInfo *bn_mul, const ITensorInfo *bn_add,
+ const ITensorInfo *add_output, const ITensorInfo *final_output,
+ ConvertPolicy policy, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, bn_mul, bn_add, final_output);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(policy != ConvertPolicy::SATURATE, "Only Saturate Policy is supported");
+
+ using ActFunction = ActivationLayerInfo::ActivationFunction;
+ const ActFunction act_func = act_info.activation();
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(
+ (act_func != ActFunction::BOUNDED_RELU && act_func != ActFunction::RELU && act_func != ActFunction::LU_BOUNDED_RELU && act_func != ActFunction::IDENTITY),
+ "Only RELU Family activations, or no activation, is supported");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input1);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
+ DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, input2);
+
+ if(is_data_type_quantized(input1->data_type()))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bn_mul, 1, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bn_add, 1, DataType::F32);
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, bn_mul);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, bn_add);
+ }
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, input2); // No broadcasting
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(bn_mul, bn_add);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(bn_mul->num_dimensions() != 1, "BatchNorm coefficients should be 1D array");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(bn_mul->tensor_shape()[0] != input1->tensor_shape()[0], "First dimensions of inputs and batchNorm coefs should match");
+
+ // Validate in case we have add layer's output (intermediate) initialized
+ if(add_output != nullptr && add_output->total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, add_output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, add_output);
+ }
+
+ // Validate in case final output has been initialized
+ if(final_output->total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input1, final_output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, final_output);
+ }
+
+ const auto uk = CpuAddMulAddKernel::get_implementation<DataTypeISASelectorData>(DataTypeISASelectorData{ input1->data_type(), CPUInfo::get().get_isa() });
+ ARM_COMPUTE_RETURN_ERROR_ON(uk == nullptr || uk->ukernel == nullptr);
+
+ return Status{};
+}
+} // namespace
+
+void CpuAddMulAddKernel::configure(const ITensorInfo *input1, const ITensorInfo *input2,
+ const ITensorInfo *bn_mul, const ITensorInfo *bn_add,
+ ITensorInfo *add_output, ITensorInfo *final_output,
+ ConvertPolicy policy, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_UNUSED(bn_mul, bn_add, input2);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, bn_add, bn_mul, final_output);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1, input2, bn_mul, bn_add, add_output, final_output, policy, act_info));
+
+ const auto uk = CpuAddMulAddKernel::get_implementation<DataTypeISASelectorData>(DataTypeISASelectorData{ input1->data_type(), CPUInfo::get().get_isa() });
+ ARM_COMPUTE_ERROR_ON_NULLPTR(uk);
+ ARM_COMPUTE_ERROR_ON(uk->ukernel == nullptr);
+
+ _policy = policy;
+ _act_info = act_info;
+ _run_method = uk->ukernel;
+ _name = std::string("CpuAddMulAddKernel/").append(uk->name);
+
+ // Auto initialize outputs if not initialized
+ set_shape_if_empty(*final_output, input1->tensor_shape());
+ set_data_type_if_unknown(*final_output, input1->data_type());
+
+ if(add_output != nullptr)
+ {
+ set_shape_if_empty(*add_output, input1->tensor_shape());
+ set_data_type_if_unknown(*add_output, input1->data_type());
+ }
+
+ // Configure kernel window
+ Window win;
+ win = calculate_max_window(*final_output, Steps());
+ ICpuKernel::configure(win);
+}
+
+Status CpuAddMulAddKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2,
+ const ITensorInfo *bn_mul, const ITensorInfo *bn_add,
+ const ITensorInfo *add_output, const ITensorInfo *final_output,
+ ConvertPolicy policy, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, bn_mul, bn_add, final_output);
+
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input1, input2, bn_mul, bn_add, add_output, final_output, policy, act_info));
+
+ return Status{};
+}
+
+void CpuAddMulAddKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
+
+ ARM_COMPUTE_ERROR_ON(tensors.empty());
+ ARM_COMPUTE_ERROR_ON(_run_method == nullptr);
+
+ const ITensor *input1 = tensors.get_const_tensor(TensorType::ACL_SRC_0);
+ const ITensor *input2 = tensors.get_const_tensor(TensorType::ACL_SRC_1);
+ const ITensor *bn_mul = tensors.get_const_tensor(TensorType::ACL_SRC_2);
+ const ITensor *bn_add = tensors.get_const_tensor(TensorType::ACL_SRC_3);
+ ITensor *add_output = tensors.get_tensor(TensorType::ACL_DST_0);
+ ITensor *final_output = tensors.get_tensor(TensorType::ACL_DST_1);
+
+ _run_method(input1, input2, bn_mul, bn_add, add_output, final_output, _policy, _act_info, window);
+}
+
+const char *CpuAddMulAddKernel::name() const
+{
+ return _name.c_str();
+}
+
+const std::vector<CpuAddMulAddKernel::AddMulAddKernel> &CpuAddMulAddKernel::get_available_kernels()
+{
+ return available_kernels;
+}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/CpuAddMulAddKernel.h b/src/cpu/kernels/CpuAddMulAddKernel.h
new file mode 100644
index 0000000000..595b580060
--- /dev/null
+++ b/src/cpu/kernels/CpuAddMulAddKernel.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef SRC_CPU_KERNELS_CPUADDMULADDKERNEL
+#define SRC_CPU_KERNELS_CPUADDMULADDKERNEL
+
+#include "src/core/common/Macros.h"
+#include "src/cpu/ICpuKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+/** Interface for the kernel to perform addition between two tensors */
+class CpuAddMulAddKernel : public ICpuKernel<CpuAddMulAddKernel>
+{
+private:
+ using AddMulAddKernelPtr =
+ std::add_pointer<void(const ITensor *, const ITensor *, const ITensor *, const ITensor *, ITensor *, ITensor *, ConvertPolicy, const ActivationLayerInfo &, const Window &)>::type;
+
+public:
+ struct AddMulAddKernel
+ {
+ const char *name;
+ const DataTypeISASelectorPtr is_selected;
+ AddMulAddKernelPtr ukernel;
+ };
+
+ CpuAddMulAddKernel() = default;
+ ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuAddMulAddKernel);
+ /** Initialize the kernel's inputs and outputs.
+ *
+ * Similar to @ref NEAddMulAdd::configure()
+ *
+ */
+ void configure(const ITensorInfo *input1, const ITensorInfo *input2,
+ const ITensorInfo *bn_mul, const ITensorInfo *bn_add,
+ ITensorInfo *add_output, ITensorInfo *final_output,
+ ConvertPolicy policy, const ActivationLayerInfo &act_info);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to CpuAddMulAddKernel::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2,
+ const ITensorInfo *bn_mul, const ITensorInfo *bn_add,
+ const ITensorInfo *add_output, const ITensorInfo *final_output,
+ ConvertPolicy policy, const ActivationLayerInfo &act_info);
+
+ // Inherited methods overridden:
+ void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+ const char *name() const override;
+
+ static const std::vector<AddMulAddKernel> &get_available_kernels();
+
+private:
+ ConvertPolicy _policy{};
+ ActivationLayerInfo _act_info{};
+ AddMulAddKernelPtr _run_method{ nullptr };
+ std::string _name{};
+};
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+#endif /* SRC_CPU_KERNELS_CPUADDMULADDKERNEL */
diff --git a/src/cpu/kernels/addmuladd/generic/neon/fp16.cpp b/src/cpu/kernels/addmuladd/generic/neon/fp16.cpp
new file mode 100644
index 0000000000..2a7e602b79
--- /dev/null
+++ b/src/cpu/kernels/addmuladd/generic/neon/fp16.cpp
@@ -0,0 +1,949 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Window.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+
+#if defined(__aarch64__) && defined(ENABLE_FP16_KERNELS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+namespace
+{
+void a64_add_bn_clamp_direct_fp16_2x32(
+ float16_t *out, size_t out_stride,
+ float16_t *out_direct, size_t out_direct_stride,
+ const float16_t *in0, size_t in0_stride,
+ const float16_t *in1, size_t in1_stride,
+ const float16_t *bn_mul,
+ const float16_t *bn_add,
+ const float16_t minval,
+ const float16_t maxval,
+ size_t width, size_t height)
+{
+ struct KernelArgs
+ {
+ float16_t minval;
+ float16_t maxval;
+ } ka;
+ ka.minval = minval;
+ ka.maxval = maxval;
+
+ __asm__ __volatile__(
+ "ldr w21, [%x[args_ptr], %[offsetof_minval]]\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_maxval]]\n"
+ "cmp %x[width], #0x20\n"
+ "dup v13.8h, w21\n"
+ "dup v12.8h, w20\n"
+ "blt 7f\n"
+ "1:" // Column loop
+ "ldr q24, [%x[bn_mul], #0x0]\n"
+ "ldr q25, [%x[bn_mul], #0x10]\n"
+ "mov x12, %x[in0]\n"
+ "mov x11, %x[in1]\n"
+ "ldr q26, [%x[bn_mul], #0x20]\n"
+ "ldr q27, [%x[bn_mul], #0x30]\n"
+ "mov x10, %x[out]\n"
+ "mov x9, %x[out_direct]\n"
+ "ldr q28, [%x[bn_add], #0x0]\n"
+ "ldr q29, [%x[bn_add], #0x10]\n"
+ "mov x20, %x[height]\n"
+ "mov x28, x12\n"
+ "ldr q30, [%x[bn_add], #0x20]\n"
+ "ldr q31, [%x[bn_add], #0x30]\n"
+ "mov x27, x11\n"
+ "mov x26, x10\n"
+ "ldr q11, [x28, #0x0]\n"
+ "ldr q10, [x27, #0x0]\n"
+ "mov x25, x9\n"
+ "add x24, x28, %x[in0_stride]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q8, [x27, #0x10]\n"
+ "add x23, x27, %x[in1_stride]\n"
+ "add x22, x26, %x[out_stride]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q6, [x27, #0x20]\n"
+ "add x21, x25, %x[out_direct_stride]\n"
+ "cmp x20, #0x2\n"
+ "ldr q5, [x28, #0x30]\n"
+ "ldr q4, [x27, #0x30]\n"
+ "add x12, x24, %x[in0_stride]\n"
+ "add x11, x23, %x[in1_stride]\n"
+ "add x10, x22, %x[out_stride]\n"
+ "add x9, x21, %x[out_direct_stride]\n"
+ "csel x24, x24, x28, GE\n"
+ "csel x23, x23, x27, GE\n"
+ "csel x22, x22, x26, GE\n"
+ "csel x21, x21, x25, GE\n"
+ "subs x20, x20, #0x2\n"
+ "add %x[bn_mul], %x[bn_mul], #0x40\n"
+ "add %x[bn_add], %x[bn_add], #0x40\n"
+ "add x28, x28, #0x40\n"
+ "add x27, x27, #0x40\n"
+ "ble 4f\n"
+ "2:" // Row loop
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "fadd v2.8h, v11.8h, v10.8h\n"
+ "fadd v1.8h, v9.8h, v8.8h\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q20, [x23, #0x10]\n"
+ "fadd v0.8h, v7.8h, v6.8h\n"
+ "fadd v23.8h, v5.8h, v4.8h\n"
+ "ldr q19, [x24, #0x20]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "fadd v22.8h, v3.8h, v22.8h\n"
+ "fadd v21.8h, v21.8h, v20.8h\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x30]\n"
+ "fadd v20.8h, v19.8h, v18.8h\n"
+ "fadd v19.8h, v17.8h, v16.8h\n"
+ "add x24, x24, #0x40\n"
+ "add x23, x23, #0x40\n"
+ "cbz %x[out_direct], 3f\n"
+ "str q2, [x25, #0x0]\n"
+ "str q1, [x25, #0x10]\n"
+ "str q0, [x25, #0x20]\n"
+ "str q23, [x25, #0x30]\n"
+ "add x25, x25, #0x40\n"
+ "str q22, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q20, [x21, #0x20]\n"
+ "str q19, [x21, #0x30]\n"
+ "add x21, x21, #0x40\n"
+ "3:" // Main loop: No direct output
+ "mov v16.16b, v2.16b\n"
+ "mov v2.16b, v28.16b\n"
+ "fmla v2.8h, v16.8h, v24.8h\n"
+ "mov x28, x12\n"
+ "ldr q11, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "mov v18.16b, v1.16b\n"
+ "mov v1.16b, v29.16b\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q5, [x28, #0x30]\n"
+ "mov v17.16b, v0.16b\n"
+ "mov v0.16b, v30.16b\n"
+ "mov v16.16b, v23.16b\n"
+ "mov v23.16b, v31.16b\n"
+ "fmla v1.8h, v18.8h, v25.8h\n"
+ "mov x27, x11\n"
+ "ldr q10, [x27, #0x0]\n"
+ "ldr q8, [x27, #0x10]\n"
+ "fmla v0.8h, v17.8h, v26.8h\n"
+ "fmla v23.8h, v16.8h, v27.8h\n"
+ "ldr q6, [x27, #0x20]\n"
+ "ldr q4, [x27, #0x30]\n"
+ "mov v17.16b, v22.16b\n"
+ "mov v22.16b, v28.16b\n"
+ "mov v16.16b, v21.16b\n"
+ "mov v21.16b, v29.16b\n"
+ "fmla v22.8h, v17.8h, v24.8h\n"
+ "mov x25, x9\n"
+ "mov v17.16b, v20.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "fmla v21.8h, v16.8h, v25.8h\n"
+ "add x24, x28, %x[in0_stride]\n"
+ "mov v16.16b, v19.16b\n"
+ "mov v19.16b, v31.16b\n"
+ "fmla v20.8h, v17.8h, v26.8h\n"
+ "add x23, x27, %x[in1_stride]\n"
+ "fmla v19.8h, v16.8h, v27.8h\n"
+ "fmin v2.8h, v2.8h, v12.8h\n"
+ "add x21, x25, %x[out_direct_stride]\n"
+ "cmp x20, #0x2\n"
+ "fmin v1.8h, v1.8h, v12.8h\n"
+ "fmin v0.8h, v0.8h, v12.8h\n"
+ "add x12, x24, %x[in0_stride]\n"
+ "add x11, x23, %x[in1_stride]\n"
+ "fmin v23.8h, v23.8h, v12.8h\n"
+ "fmax v2.8h, v2.8h, v13.8h\n"
+ "str q2, [x26, #0x0]\n"
+ "add x9, x21, %x[out_direct_stride]\n"
+ "fmax v1.8h, v1.8h, v13.8h\n"
+ "fmax v0.8h, v0.8h, v13.8h\n"
+ "str q1, [x26, #0x10]\n"
+ "csel x24, x24, x28, GE\n"
+ "fmax v23.8h, v23.8h, v13.8h\n"
+ "fmin v22.8h, v22.8h, v12.8h\n"
+ "str q0, [x26, #0x20]\n"
+ "csel x23, x23, x27, GE\n"
+ "fmin v21.8h, v21.8h, v12.8h\n"
+ "fmin v20.8h, v20.8h, v12.8h\n"
+ "str q23, [x26, #0x30]\n"
+ "mov x26, x10\n"
+ "fmin v19.8h, v19.8h, v12.8h\n"
+ "fmax v22.8h, v22.8h, v13.8h\n"
+ "str q22, [x22, #0x0]\n"
+ "csel x21, x21, x25, GE\n"
+ "fmax v21.8h, v21.8h, v13.8h\n"
+ "fmax v20.8h, v20.8h, v13.8h\n"
+ "str q21, [x22, #0x10]\n"
+ "add x28, x28, #0x40\n"
+ "fmax v19.8h, v19.8h, v13.8h\n"
+ "str q20, [x22, #0x20]\n"
+ "add x27, x27, #0x40\n"
+ "str q19, [x22, #0x30]\n"
+ "add x22, x26, %x[out_stride]\n"
+ "add x10, x22, %x[out_stride]\n"
+ "csel x22, x22, x26, GE\n"
+ "subs x20, x20, #0x2\n"
+ "bgt 2b\n"
+ "4:" // Row loop skip
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "fadd v2.8h, v11.8h, v10.8h\n"
+ "fadd v1.8h, v9.8h, v8.8h\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q20, [x23, #0x10]\n"
+ "fadd v0.8h, v7.8h, v6.8h\n"
+ "fadd v23.8h, v5.8h, v4.8h\n"
+ "ldr q19, [x24, #0x20]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "fadd v22.8h, v3.8h, v22.8h\n"
+ "fadd v21.8h, v21.8h, v20.8h\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x30]\n"
+ "fadd v20.8h, v19.8h, v18.8h\n"
+ "fadd v19.8h, v17.8h, v16.8h\n"
+ "add x24, x24, #0x40\n"
+ "add x23, x23, #0x40\n"
+ "cbz %x[out_direct], 5f\n"
+ "str q2, [x25, #0x0]\n"
+ "str q1, [x25, #0x10]\n"
+ "str q0, [x25, #0x20]\n"
+ "str q23, [x25, #0x30]\n"
+ "add x25, x25, #0x40\n"
+ "str q22, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q20, [x21, #0x20]\n"
+ "str q19, [x21, #0x30]\n"
+ "add x21, x21, #0x40\n"
+ "5:" // Tail loop: No direct output
+ "mov v16.16b, v2.16b\n"
+ "mov v2.16b, v28.16b\n"
+ "fmla v2.8h, v16.8h, v24.8h\n"
+ "add %x[in0], %x[in0], #0x40\n"
+ "mov v16.16b, v1.16b\n"
+ "mov v1.16b, v29.16b\n"
+ "fmla v1.8h, v16.8h, v25.8h\n"
+ "add %x[in1], %x[in1], #0x40\n"
+ "mov v16.16b, v0.16b\n"
+ "mov v0.16b, v30.16b\n"
+ "fmla v0.8h, v16.8h, v26.8h\n"
+ "add %x[out], %x[out], #0x40\n"
+ "mov v16.16b, v23.16b\n"
+ "mov v23.16b, v31.16b\n"
+ "fmla v23.8h, v16.8h, v27.8h\n"
+ "mov v16.16b, v22.16b\n"
+ "mov v22.16b, v28.16b\n"
+ "fmla v22.8h, v16.8h, v24.8h\n"
+ "mov v16.16b, v21.16b\n"
+ "mov v21.16b, v29.16b\n"
+ "fmla v21.8h, v16.8h, v25.8h\n"
+ "mov v16.16b, v20.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "fmla v20.8h, v16.8h, v26.8h\n"
+ "mov v16.16b, v19.16b\n"
+ "mov v19.16b, v31.16b\n"
+ "fmla v19.8h, v16.8h, v27.8h\n"
+ "fmin v2.8h, v2.8h, v12.8h\n"
+ "fmin v1.8h, v1.8h, v12.8h\n"
+ "fmin v0.8h, v0.8h, v12.8h\n"
+ "fmin v23.8h, v23.8h, v12.8h\n"
+ "fmin v22.8h, v22.8h, v12.8h\n"
+ "fmin v21.8h, v21.8h, v12.8h\n"
+ "fmin v20.8h, v20.8h, v12.8h\n"
+ "fmin v19.8h, v19.8h, v12.8h\n"
+ "fmax v2.8h, v2.8h, v13.8h\n"
+ "fmax v1.8h, v1.8h, v13.8h\n"
+ "str q2, [x26, #0x0]\n"
+ "fmax v0.8h, v0.8h, v13.8h\n"
+ "fmax v23.8h, v23.8h, v13.8h\n"
+ "str q1, [x26, #0x10]\n"
+ "fmax v22.8h, v22.8h, v13.8h\n"
+ "fmax v21.8h, v21.8h, v13.8h\n"
+ "str q0, [x26, #0x20]\n"
+ "fmax v20.8h, v20.8h, v13.8h\n"
+ "fmax v19.8h, v19.8h, v13.8h\n"
+ "str q23, [x26, #0x30]\n"
+ "add x26, x26, #0x40\n"
+ "str q22, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q20, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "add x22, x22, #0x40\n"
+ "cbz %x[out_direct], 6f\n"
+ "add %x[out_direct], %x[out_direct], #0x40\n"
+ "6:" // No direct pointer update
+ "sub %x[width], %x[width], #0x20\n"
+ "cmp %x[width], #0x20\n"
+ "bge 1b\n"
+ "cbz %x[width], 58f\n"
+ "7:" // main loop skip
+ "ldr q24, [%x[bn_mul], #0x0]\n"
+ "ldr q25, [%x[bn_mul], #0x10]\n"
+ "mov x20, %x[height]\n"
+ "mov x12, %x[in0]\n"
+ "ldr q26, [%x[bn_mul], #0x20]\n"
+ "ldr q27, [%x[bn_mul], #0x30]\n"
+ "mov x11, %x[in1]\n"
+ "mov x10, %x[out]\n"
+ "ldr q28, [%x[bn_add], #0x0]\n"
+ "ldr q29, [%x[bn_add], #0x10]\n"
+ "mov x9, %x[out_direct]\n"
+ "add %x[bn_mul], %x[bn_mul], #0x40\n"
+ "ldr q30, [%x[bn_add], #0x20]\n"
+ "ldr q31, [%x[bn_add], #0x30]\n"
+ "add %x[bn_add], %x[bn_add], #0x40\n"
+ "8:" // tail loop: Row loop
+ "mov x28, x12\n"
+ "mov x27, x11\n"
+ "mov x26, x10\n"
+ "mov x25, x9\n"
+ "add x24, x28, %x[in0_stride]\n"
+ "add x23, x27, %x[in1_stride]\n"
+ "add x22, x26, %x[out_stride]\n"
+ "add x21, x25, %x[out_direct_stride]\n"
+ "cmp x20, #0x2\n"
+ "add x12, x24, %x[in0_stride]\n"
+ "add x11, x23, %x[in1_stride]\n"
+ "add x10, x22, %x[out_stride]\n"
+ "add x9, x21, %x[out_direct_stride]\n"
+ "csel x24, x24, x28, GE\n"
+ "csel x23, x23, x27, GE\n"
+ "csel x22, x22, x26, GE\n"
+ "csel x21, x21, x25, GE\n"
+ "tbz %x[width], #4, 16f\n"
+ "ldr q11, [x28, #0x0]\n"
+ "ldr q10, [x27, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q8, [x27, #0x10]\n"
+ "add x28, x28, #0x20\n"
+ "add x27, x27, #0x20\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q20, [x23, #0x10]\n"
+ "add x24, x24, #0x20\n"
+ "add x23, x23, #0x20\n"
+ "tbz %x[width], #3, 12f\n"
+ "ldr q7, [x28, #0x0]\n"
+ "ldr q6, [x27, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "add x27, x27, #0x10\n"
+ "ldr q19, [x24, #0x0]\n"
+ "ldr q18, [x23, #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "tbz %x[width], #2, 10f\n"
+ "ldr d5, [x28], #0x8\n"
+ "ldr d4, [x27], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "tbz %x[width], #1, 9f\n"
+ "ld1 { v5.s }[2], [x28], #0x4\n"
+ "ld1 { v4.s }[2], [x27], #0x4\n"
+ "ld1 { v17.s }[2], [x24], #0x4\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
+ "tbz %x[width], #0, 24f\n"
+ "ld1 { v5.h }[6], [x28], #0x2\n"
+ "ld1 { v4.h }[6], [x27], #0x2\n"
+ "ld1 { v17.h }[6], [x24], #0x2\n"
+ "ld1 { v16.h }[6], [x23], #0x2\n"
+ "b 24f\n"
+ "9:" // tail loop: unique 1: partial_0_28
+ "tbz %x[width], #0, 24f\n"
+ "ld1 { v5.h }[4], [x28], #0x2\n"
+ "ld1 { v4.h }[4], [x27], #0x2\n"
+ "ld1 { v17.h }[4], [x24], #0x2\n"
+ "ld1 { v16.h }[4], [x23], #0x2\n"
+ "b 24f\n"
+ "10:" // tail loop: unique 1: partial_1_24
+ "tbz %x[width], #1, 11f\n"
+ "ldr s5, [x28], #0x4\n"
+ "ldr s4, [x27], #0x4\n"
+ "ldr s17, [x24], #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "tbz %x[width], #0, 24f\n"
+ "ld1 { v5.h }[2], [x28], #0x2\n"
+ "ld1 { v4.h }[2], [x27], #0x2\n"
+ "ld1 { v17.h }[2], [x24], #0x2\n"
+ "ld1 { v16.h }[2], [x23], #0x2\n"
+ "b 24f\n"
+ "11:" // tail loop: unique 1: partial_0_24
+ "tbz %x[width], #0, 24f\n"
+ "ldr h5, [x28], #0x2\n"
+ "ldr h4, [x27], #0x2\n"
+ "ldr h17, [x24], #0x2\n"
+ "ldr h16, [x23], #0x2\n"
+ "b 24f\n"
+ "12:" // tail loop: unique 1: partial_2_16
+ "tbz %x[width], #2, 14f\n"
+ "ldr d7, [x28], #0x8\n"
+ "ldr d6, [x27], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "tbz %x[width], #1, 13f\n"
+ "ld1 { v7.s }[2], [x28], #0x4\n"
+ "ld1 { v6.s }[2], [x27], #0x4\n"
+ "ld1 { v19.s }[2], [x24], #0x4\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
+ "tbz %x[width], #0, 24f\n"
+ "ld1 { v7.h }[6], [x28], #0x2\n"
+ "ld1 { v6.h }[6], [x27], #0x2\n"
+ "ld1 { v19.h }[6], [x24], #0x2\n"
+ "ld1 { v18.h }[6], [x23], #0x2\n"
+ "b 24f\n"
+ "13:" // tail loop: unique 1: partial_0_20
+ "tbz %x[width], #0, 24f\n"
+ "ld1 { v7.h }[4], [x28], #0x2\n"
+ "ld1 { v6.h }[4], [x27], #0x2\n"
+ "ld1 { v19.h }[4], [x24], #0x2\n"
+ "ld1 { v18.h }[4], [x23], #0x2\n"
+ "b 24f\n"
+ "14:" // tail loop: unique 1: partial_1_16
+ "tbz %x[width], #1, 15f\n"
+ "ldr s7, [x28], #0x4\n"
+ "ldr s6, [x27], #0x4\n"
+ "ldr s19, [x24], #0x4\n"
+ "ldr s18, [x23], #0x4\n"
+ "tbz %x[width], #0, 24f\n"
+ "ld1 { v7.h }[2], [x28], #0x2\n"
+ "ld1 { v6.h }[2], [x27], #0x2\n"
+ "ld1 { v19.h }[2], [x24], #0x2\n"
+ "ld1 { v18.h }[2], [x23], #0x2\n"
+ "b 24f\n"
+ "15:" // tail loop: unique 1: partial_0_16
+ "tbz %x[width], #0, 24f\n"
+ "ldr h7, [x28], #0x2\n"
+ "ldr h6, [x27], #0x2\n"
+ "ldr h19, [x24], #0x2\n"
+ "ldr h18, [x23], #0x2\n"
+ "b 24f\n"
+ "16:" // tail loop: unique 1: partial_3_0
+ "tbz %x[width], #3, 20f\n"
+ "ldr q11, [x28, #0x0]\n"
+ "ldr q10, [x27, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "add x27, x27, #0x10\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "tbz %x[width], #2, 18f\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d8, [x27], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d20, [x23], #0x8\n"
+ "tbz %x[width], #1, 17f\n"
+ "ld1 { v9.s }[2], [x28], #0x4\n"
+ "ld1 { v8.s }[2], [x27], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v20.s }[2], [x23], #0x4\n"
+ "tbz %x[width], #0, 24f\n"
+ "ld1 { v9.h }[6], [x28], #0x2\n"
+ "ld1 { v8.h }[6], [x27], #0x2\n"
+ "ld1 { v21.h }[6], [x24], #0x2\n"
+ "ld1 { v20.h }[6], [x23], #0x2\n"
+ "b 24f\n"
+ "17:" // tail loop: unique 1: partial_0_12
+ "tbz %x[width], #0, 24f\n"
+ "ld1 { v9.h }[4], [x28], #0x2\n"
+ "ld1 { v8.h }[4], [x27], #0x2\n"
+ "ld1 { v21.h }[4], [x24], #0x2\n"
+ "ld1 { v20.h }[4], [x23], #0x2\n"
+ "b 24f\n"
+ "18:" // tail loop: unique 1: partial_1_8
+ "tbz %x[width], #1, 19f\n"
+ "ldr s9, [x28], #0x4\n"
+ "ldr s8, [x27], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s20, [x23], #0x4\n"
+ "tbz %x[width], #0, 24f\n"
+ "ld1 { v9.h }[2], [x28], #0x2\n"
+ "ld1 { v8.h }[2], [x27], #0x2\n"
+ "ld1 { v21.h }[2], [x24], #0x2\n"
+ "ld1 { v20.h }[2], [x23], #0x2\n"
+ "b 24f\n"
+ "19:" // tail loop: unique 1: partial_0_8
+ "tbz %x[width], #0, 24f\n"
+ "ldr h9, [x28], #0x2\n"
+ "ldr h8, [x27], #0x2\n"
+ "ldr h21, [x24], #0x2\n"
+ "ldr h20, [x23], #0x2\n"
+ "b 24f\n"
+ "20:" // tail loop: unique 1: partial_2_0
+ "tbz %x[width], #2, 22f\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d10, [x27], #0x8\n"
+ "ldr d3, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "tbz %x[width], #1, 21f\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
+ "ld1 { v10.s }[2], [x27], #0x4\n"
+ "ld1 { v3.s }[2], [x24], #0x4\n"
+ "ld1 { v22.s }[2], [x23], #0x4\n"
+ "tbz %x[width], #0, 24f\n"
+ "ld1 { v11.h }[6], [x28], #0x2\n"
+ "ld1 { v10.h }[6], [x27], #0x2\n"
+ "ld1 { v3.h }[6], [x24], #0x2\n"
+ "ld1 { v22.h }[6], [x23], #0x2\n"
+ "b 24f\n"
+ "21:" // tail loop: unique 1: partial_0_4
+ "tbz %x[width], #0, 24f\n"
+ "ld1 { v11.h }[4], [x28], #0x2\n"
+ "ld1 { v10.h }[4], [x27], #0x2\n"
+ "ld1 { v3.h }[4], [x24], #0x2\n"
+ "ld1 { v22.h }[4], [x23], #0x2\n"
+ "b 24f\n"
+ "22:" // tail loop: unique 1: partial_1_0
+ "tbz %x[width], #1, 23f\n"
+ "ldr s11, [x28], #0x4\n"
+ "ldr s10, [x27], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s22, [x23], #0x4\n"
+ "tbz %x[width], #0, 24f\n"
+ "ld1 { v11.h }[2], [x28], #0x2\n"
+ "ld1 { v10.h }[2], [x27], #0x2\n"
+ "ld1 { v3.h }[2], [x24], #0x2\n"
+ "ld1 { v22.h }[2], [x23], #0x2\n"
+ "b 24f\n"
+ "23:" // tail loop: unique 1: partial_0_0
+ "ldr h11, [x28], #0x2\n"
+ "ldr h10, [x27], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "ldr h22, [x23], #0x2\n"
+ "24:" // tail loop: unique 1: Done
+ "fadd v2.8h, v11.8h, v10.8h\n"
+ "fadd v1.8h, v9.8h, v8.8h\n"
+ "fadd v0.8h, v7.8h, v6.8h\n"
+ "fadd v23.8h, v5.8h, v4.8h\n"
+ "fadd v22.8h, v3.8h, v22.8h\n"
+ "fadd v21.8h, v21.8h, v20.8h\n"
+ "fadd v20.8h, v19.8h, v18.8h\n"
+ "fadd v19.8h, v17.8h, v16.8h\n"
+ "cbz %x[out_direct], 41f\n"
+ "tbz %x[width], #4, 32f\n"
+ "str q2, [x25, #0x0]\n"
+ "str q1, [x25, #0x10]\n"
+ "add x25, x25, #0x20\n"
+ "str q22, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "add x21, x21, #0x20\n"
+ "tbz %x[width], #3, 28f\n"
+ "str q0, [x25, #0x0]\n"
+ "add x25, x25, #0x10\n"
+ "str q20, [x21, #0x0]\n"
+ "add x21, x21, #0x10\n"
+ "tbz %x[width], #2, 26f\n"
+ "str d23, [x25], #0x8\n"
+ "str d19, [x21], #0x8\n"
+ "tbz %x[width], #1, 25f\n"
+ "st1 { v23.s }[2], [x25], #0x4\n"
+ "st1 { v19.s }[2], [x21], #0x4\n"
+ "tbz %x[width], #0, 40f\n"
+ "st1 { v23.h }[6], [x25], #0x2\n"
+ "st1 { v19.h }[6], [x21], #0x2\n"
+ "b 40f\n"
+ "25:" // tail loop: Main loop: unique 2: partial_0_28
+ "tbz %x[width], #0, 40f\n"
+ "st1 { v23.h }[4], [x25], #0x2\n"
+ "st1 { v19.h }[4], [x21], #0x2\n"
+ "b 40f\n"
+ "26:" // tail loop: Main loop: unique 2: partial_1_24
+ "tbz %x[width], #1, 27f\n"
+ "str s23, [x25], #0x4\n"
+ "str s19, [x21], #0x4\n"
+ "tbz %x[width], #0, 40f\n"
+ "st1 { v23.h }[2], [x25], #0x2\n"
+ "st1 { v19.h }[2], [x21], #0x2\n"
+ "b 40f\n"
+ "27:" // tail loop: Main loop: unique 2: partial_0_24
+ "tbz %x[width], #0, 40f\n"
+ "str h23, [x25], #0x2\n"
+ "str h19, [x21], #0x2\n"
+ "b 40f\n"
+ "28:" // tail loop: Main loop: unique 2: partial_2_16
+ "tbz %x[width], #2, 30f\n"
+ "str d0, [x25], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "tbz %x[width], #1, 29f\n"
+ "st1 { v0.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "tbz %x[width], #0, 40f\n"
+ "st1 { v0.h }[6], [x25], #0x2\n"
+ "st1 { v20.h }[6], [x21], #0x2\n"
+ "b 40f\n"
+ "29:" // tail loop: Main loop: unique 2: partial_0_20
+ "tbz %x[width], #0, 40f\n"
+ "st1 { v0.h }[4], [x25], #0x2\n"
+ "st1 { v20.h }[4], [x21], #0x2\n"
+ "b 40f\n"
+ "30:" // tail loop: Main loop: unique 2: partial_1_16
+ "tbz %x[width], #1, 31f\n"
+ "str s0, [x25], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "tbz %x[width], #0, 40f\n"
+ "st1 { v0.h }[2], [x25], #0x2\n"
+ "st1 { v20.h }[2], [x21], #0x2\n"
+ "b 40f\n"
+ "31:" // tail loop: Main loop: unique 2: partial_0_16
+ "tbz %x[width], #0, 40f\n"
+ "str h0, [x25], #0x2\n"
+ "str h20, [x21], #0x2\n"
+ "b 40f\n"
+ "32:" // tail loop: Main loop: unique 2: partial_3_0
+ "tbz %x[width], #3, 36f\n"
+ "str q2, [x25, #0x0]\n"
+ "add x25, x25, #0x10\n"
+ "str q22, [x21, #0x0]\n"
+ "add x21, x21, #0x10\n"
+ "tbz %x[width], #2, 34f\n"
+ "str d1, [x25], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "tbz %x[width], #1, 33f\n"
+ "st1 { v1.s }[2], [x25], #0x4\n"
+ "st1 { v21.s }[2], [x21], #0x4\n"
+ "tbz %x[width], #0, 40f\n"
+ "st1 { v1.h }[6], [x25], #0x2\n"
+ "st1 { v21.h }[6], [x21], #0x2\n"
+ "b 40f\n"
+ "33:" // tail loop: Main loop: unique 2: partial_0_12
+ "tbz %x[width], #0, 40f\n"
+ "st1 { v1.h }[4], [x25], #0x2\n"
+ "st1 { v21.h }[4], [x21], #0x2\n"
+ "b 40f\n"
+ "34:" // tail loop: Main loop: unique 2: partial_1_8
+ "tbz %x[width], #1, 35f\n"
+ "str s1, [x25], #0x4\n"
+ "str s21, [x21], #0x4\n"
+ "tbz %x[width], #0, 40f\n"
+ "st1 { v1.h }[2], [x25], #0x2\n"
+ "st1 { v21.h }[2], [x21], #0x2\n"
+ "b 40f\n"
+ "35:" // tail loop: Main loop: unique 2: partial_0_8
+ "tbz %x[width], #0, 40f\n"
+ "str h1, [x25], #0x2\n"
+ "str h21, [x21], #0x2\n"
+ "b 40f\n"
+ "36:" // tail loop: Main loop: unique 2: partial_2_0
+ "tbz %x[width], #2, 38f\n"
+ "str d2, [x25], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "tbz %x[width], #1, 37f\n"
+ "st1 { v2.s }[2], [x25], #0x4\n"
+ "st1 { v22.s }[2], [x21], #0x4\n"
+ "tbz %x[width], #0, 40f\n"
+ "st1 { v2.h }[6], [x25], #0x2\n"
+ "st1 { v22.h }[6], [x21], #0x2\n"
+ "b 40f\n"
+ "37:" // tail loop: Main loop: unique 2: partial_0_4
+ "tbz %x[width], #0, 40f\n"
+ "st1 { v2.h }[4], [x25], #0x2\n"
+ "st1 { v22.h }[4], [x21], #0x2\n"
+ "b 40f\n"
+ "38:" // tail loop: Main loop: unique 2: partial_1_0
+ "tbz %x[width], #1, 39f\n"
+ "str s2, [x25], #0x4\n"
+ "str s22, [x21], #0x4\n"
+ "tbz %x[width], #0, 40f\n"
+ "st1 { v2.h }[2], [x25], #0x2\n"
+ "st1 { v22.h }[2], [x21], #0x2\n"
+ "b 40f\n"
+ "39:" // tail loop: Main loop: unique 2: partial_0_0
+ "str h2, [x25], #0x2\n"
+ "str h22, [x21], #0x2\n"
+ "40:" // tail loop: Main loop: unique 2: Done
+ "41:" // tail loop: Main loop: No direct output
+ "mov v16.16b, v2.16b\n"
+ "mov v2.16b, v28.16b\n"
+ "fmla v2.8h, v16.8h, v24.8h\n"
+ "mov v16.16b, v1.16b\n"
+ "mov v1.16b, v29.16b\n"
+ "fmla v1.8h, v16.8h, v25.8h\n"
+ "mov v16.16b, v0.16b\n"
+ "mov v0.16b, v30.16b\n"
+ "fmla v0.8h, v16.8h, v26.8h\n"
+ "mov v16.16b, v23.16b\n"
+ "mov v23.16b, v31.16b\n"
+ "fmla v23.8h, v16.8h, v27.8h\n"
+ "mov v16.16b, v22.16b\n"
+ "mov v22.16b, v28.16b\n"
+ "fmla v22.8h, v16.8h, v24.8h\n"
+ "mov v16.16b, v21.16b\n"
+ "mov v21.16b, v29.16b\n"
+ "fmla v21.8h, v16.8h, v25.8h\n"
+ "mov v16.16b, v20.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "fmla v20.8h, v16.8h, v26.8h\n"
+ "mov v16.16b, v19.16b\n"
+ "mov v19.16b, v31.16b\n"
+ "fmla v19.8h, v16.8h, v27.8h\n"
+ "fmin v2.8h, v2.8h, v12.8h\n"
+ "fmin v1.8h, v1.8h, v12.8h\n"
+ "fmin v0.8h, v0.8h, v12.8h\n"
+ "fmin v23.8h, v23.8h, v12.8h\n"
+ "fmin v22.8h, v22.8h, v12.8h\n"
+ "fmin v21.8h, v21.8h, v12.8h\n"
+ "fmin v20.8h, v20.8h, v12.8h\n"
+ "fmin v19.8h, v19.8h, v12.8h\n"
+ "fmax v2.8h, v2.8h, v13.8h\n"
+ "fmax v1.8h, v1.8h, v13.8h\n"
+ "fmax v0.8h, v0.8h, v13.8h\n"
+ "fmax v23.8h, v23.8h, v13.8h\n"
+ "fmax v22.8h, v22.8h, v13.8h\n"
+ "fmax v21.8h, v21.8h, v13.8h\n"
+ "fmax v20.8h, v20.8h, v13.8h\n"
+ "fmax v19.8h, v19.8h, v13.8h\n"
+ "tbz %x[width], #4, 49f\n"
+ "str q2, [x26, #0x0]\n"
+ "str q1, [x26, #0x10]\n"
+ "add x26, x26, #0x20\n"
+ "str q22, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "add x22, x22, #0x20\n"
+ "tbz %x[width], #3, 45f\n"
+ "str q0, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q20, [x22, #0x0]\n"
+ "add x22, x22, #0x10\n"
+ "tbz %x[width], #2, 43f\n"
+ "str d23, [x26], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "tbz %x[width], #1, 42f\n"
+ "st1 { v23.s }[2], [x26], #0x4\n"
+ "st1 { v19.s }[2], [x22], #0x4\n"
+ "tbz %x[width], #0, 57f\n"
+ "st1 { v23.h }[6], [x26], #0x2\n"
+ "st1 { v19.h }[6], [x22], #0x2\n"
+ "b 57f\n"
+ "42:" // tail loop: unique 3: partial_0_28
+ "tbz %x[width], #0, 57f\n"
+ "st1 { v23.h }[4], [x26], #0x2\n"
+ "st1 { v19.h }[4], [x22], #0x2\n"
+ "b 57f\n"
+ "43:" // tail loop: unique 3: partial_1_24
+ "tbz %x[width], #1, 44f\n"
+ "str s23, [x26], #0x4\n"
+ "str s19, [x22], #0x4\n"
+ "tbz %x[width], #0, 57f\n"
+ "st1 { v23.h }[2], [x26], #0x2\n"
+ "st1 { v19.h }[2], [x22], #0x2\n"
+ "b 57f\n"
+ "44:" // tail loop: unique 3: partial_0_24
+ "tbz %x[width], #0, 57f\n"
+ "str h23, [x26], #0x2\n"
+ "str h19, [x22], #0x2\n"
+ "b 57f\n"
+ "45:" // tail loop: unique 3: partial_2_16
+ "tbz %x[width], #2, 47f\n"
+ "str d0, [x26], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "tbz %x[width], #1, 46f\n"
+ "st1 { v0.s }[2], [x26], #0x4\n"
+ "st1 { v20.s }[2], [x22], #0x4\n"
+ "tbz %x[width], #0, 57f\n"
+ "st1 { v0.h }[6], [x26], #0x2\n"
+ "st1 { v20.h }[6], [x22], #0x2\n"
+ "b 57f\n"
+ "46:" // tail loop: unique 3: partial_0_20
+ "tbz %x[width], #0, 57f\n"
+ "st1 { v0.h }[4], [x26], #0x2\n"
+ "st1 { v20.h }[4], [x22], #0x2\n"
+ "b 57f\n"
+ "47:" // tail loop: unique 3: partial_1_16
+ "tbz %x[width], #1, 48f\n"
+ "str s0, [x26], #0x4\n"
+ "str s20, [x22], #0x4\n"
+ "tbz %x[width], #0, 57f\n"
+ "st1 { v0.h }[2], [x26], #0x2\n"
+ "st1 { v20.h }[2], [x22], #0x2\n"
+ "b 57f\n"
+ "48:" // tail loop: unique 3: partial_0_16
+ "tbz %x[width], #0, 57f\n"
+ "str h0, [x26], #0x2\n"
+ "str h20, [x22], #0x2\n"
+ "b 57f\n"
+ "49:" // tail loop: unique 3: partial_3_0
+ "tbz %x[width], #3, 53f\n"
+ "str q2, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q22, [x22, #0x0]\n"
+ "add x22, x22, #0x10\n"
+ "tbz %x[width], #2, 51f\n"
+ "str d1, [x26], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "tbz %x[width], #1, 50f\n"
+ "st1 { v1.s }[2], [x26], #0x4\n"
+ "st1 { v21.s }[2], [x22], #0x4\n"
+ "tbz %x[width], #0, 57f\n"
+ "st1 { v1.h }[6], [x26], #0x2\n"
+ "st1 { v21.h }[6], [x22], #0x2\n"
+ "b 57f\n"
+ "50:" // tail loop: unique 3: partial_0_12
+ "tbz %x[width], #0, 57f\n"
+ "st1 { v1.h }[4], [x26], #0x2\n"
+ "st1 { v21.h }[4], [x22], #0x2\n"
+ "b 57f\n"
+ "51:" // tail loop: unique 3: partial_1_8
+ "tbz %x[width], #1, 52f\n"
+ "str s1, [x26], #0x4\n"
+ "str s21, [x22], #0x4\n"
+ "tbz %x[width], #0, 57f\n"
+ "st1 { v1.h }[2], [x26], #0x2\n"
+ "st1 { v21.h }[2], [x22], #0x2\n"
+ "b 57f\n"
+ "52:" // tail loop: unique 3: partial_0_8
+ "tbz %x[width], #0, 57f\n"
+ "str h1, [x26], #0x2\n"
+ "str h21, [x22], #0x2\n"
+ "b 57f\n"
+ "53:" // tail loop: unique 3: partial_2_0
+ "tbz %x[width], #2, 55f\n"
+ "str d2, [x26], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "tbz %x[width], #1, 54f\n"
+ "st1 { v2.s }[2], [x26], #0x4\n"
+ "st1 { v22.s }[2], [x22], #0x4\n"
+ "tbz %x[width], #0, 57f\n"
+ "st1 { v2.h }[6], [x26], #0x2\n"
+ "st1 { v22.h }[6], [x22], #0x2\n"
+ "b 57f\n"
+ "54:" // tail loop: unique 3: partial_0_4
+ "tbz %x[width], #0, 57f\n"
+ "st1 { v2.h }[4], [x26], #0x2\n"
+ "st1 { v22.h }[4], [x22], #0x2\n"
+ "b 57f\n"
+ "55:" // tail loop: unique 3: partial_1_0
+ "tbz %x[width], #1, 56f\n"
+ "str s2, [x26], #0x4\n"
+ "str s22, [x22], #0x4\n"
+ "tbz %x[width], #0, 57f\n"
+ "st1 { v2.h }[2], [x26], #0x2\n"
+ "st1 { v22.h }[2], [x22], #0x2\n"
+ "b 57f\n"
+ "56:" // tail loop: unique 3: partial_0_0
+ "str h2, [x26], #0x2\n"
+ "str h22, [x22], #0x2\n"
+ "57:" // tail loop: unique 3: Done
+ "subs x20, x20, #0x2\n"
+ "bgt 8b\n"
+ "58:" // odd columns skip
+ : [bn_add] "+&r"(bn_add), [bn_mul] "+&r"(bn_mul), [in0] "+&r"(in0), [in1] "+&r"(in1), [out] "+&r"(out), [out_direct] "+&r"(out_direct), [width] "+&r"(width)
+ : [args_ptr] "r"(&ka), [height] "r"(height), [in0_stride] "r"(in0_stride), [in1_stride] "r"(in1_stride), [offsetof_maxval] "I"(offsetof(KernelArgs, maxval)), [offsetof_minval] "I"(offsetof(KernelArgs, minval)), [out_direct_stride] "r"(out_direct_stride), [out_stride] "r"(out_stride)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28");
+}
+
+} // namespace
+
+namespace arm_compute
+{
+namespace cpu
+{
+void add_mul_add_fp16_neon(const ITensor *input1, const ITensor *input2, const ITensor *bn_mul, const ITensor *bn_add,
+ ITensor *add_output, ITensor *final_output, ConvertPolicy policy, const ActivationLayerInfo &act_info, const Window &window)
+{
+ ARM_COMPUTE_UNUSED(policy);
+
+ const size_t out_stride = final_output->info()->strides_in_bytes()[1];
+ const size_t out_direct_stride = (add_output != nullptr) ? add_output->info()->strides_in_bytes()[1] : 0;
+ const size_t in0_stride = input1->info()->strides_in_bytes()[1];
+ const size_t in1_stride = input2->info()->strides_in_bytes()[1];
+
+ float16_t minval = std::numeric_limits<half>::lowest();
+ float16_t maxval = std::numeric_limits<half>::max();
+
+ if(act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU)
+ {
+ minval = static_cast<float16_t>(0.f);
+ }
+ else if(act_info.activation() == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
+ {
+ minval = static_cast<float16_t>(0.f);
+ maxval = static_cast<float16_t>(act_info.a());
+ }
+ else if(act_info.activation() == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+ {
+ minval = static_cast<float16_t>(act_info.b());
+ maxval = static_cast<float16_t>(act_info.a());
+ }
+
+ // Clear X & Y dimensions on execution window as we handle manually
+ Window win = window;
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+ win.set(Window::DimY, Window::Dimension(0, 1, 1));
+
+ Iterator in1_it(input1, window);
+ Iterator in2_it(input2, window);
+ Iterator out_it(final_output, window);
+
+ const size_t width = window.num_iterations(0);
+ const size_t height = window.num_iterations(1);
+
+ if(add_output != nullptr)
+ {
+ Iterator add_out_it(add_output, window);
+ execute_window_loop(
+ win, [&](const Coordinates &)
+ {
+ a64_add_bn_clamp_direct_fp16_2x32(
+ reinterpret_cast<float16_t *>(out_it.ptr()), out_stride,
+ reinterpret_cast<float16_t *>(add_out_it.ptr()), out_direct_stride,
+ reinterpret_cast<float16_t *>(in1_it.ptr()), in0_stride,
+ reinterpret_cast<float16_t *>(in2_it.ptr()), in1_stride,
+ reinterpret_cast<float16_t *>(bn_mul->buffer()),
+ reinterpret_cast<float16_t *>(bn_add->buffer()),
+ minval,
+ maxval,
+ width, height);
+ },
+ in1_it, in2_it, add_out_it, out_it);
+ }
+ else
+ {
+ execute_window_loop(
+ win, [&](const Coordinates &)
+ {
+ a64_add_bn_clamp_direct_fp16_2x32(
+ reinterpret_cast<float16_t *>(out_it.ptr()), out_stride,
+ nullptr, out_direct_stride,
+ reinterpret_cast<float16_t *>(in1_it.ptr()), in0_stride,
+ reinterpret_cast<float16_t *>(in2_it.ptr()), in1_stride,
+ reinterpret_cast<float16_t *>(bn_mul->buffer()),
+ reinterpret_cast<float16_t *>(bn_add->buffer()),
+ minval,
+ maxval,
+ width, height);
+ },
+ in1_it, in2_it, out_it);
+ }
+}
+} // namespace cpu
+} // namespace arm_compute
+
+#endif // defined(__aarch64__) && defined(ENABLE_FP16_KERNELS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/cpu/kernels/addmuladd/generic/neon/fp32.cpp b/src/cpu/kernels/addmuladd/generic/neon/fp32.cpp
new file mode 100644
index 0000000000..a102682f8b
--- /dev/null
+++ b/src/cpu/kernels/addmuladd/generic/neon/fp32.cpp
@@ -0,0 +1,724 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Window.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+
+#ifdef __aarch64__
+namespace
+{
+void a64_add_bn_clamp_direct_fp32_2x16(
+ float *out, size_t out_stride,
+ float *out_direct, size_t out_direct_stride,
+ const float *in0, size_t in0_stride,
+ const float *in1, size_t in1_stride,
+ const float *bn_mul,
+ const float *bn_add,
+ const float minval,
+ const float maxval,
+ size_t width, size_t height)
+{
+ struct KernelArgs
+ {
+ float minval;
+ float maxval;
+ } ka;
+ ka.minval = minval;
+ ka.maxval = maxval;
+
+ __asm__ __volatile__(
+ "ldr w21, [%x[args_ptr], %[offsetof_minval]]\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_maxval]]\n"
+ "cmp %x[width], #0x10\n"
+ "dup v13.4s, w21\n"
+ "dup v12.4s, w20\n"
+ "blt 7f\n"
+ "1:" // Column loop
+ "ldr q24, [%x[bn_mul], #0x0]\n"
+ "ldr q25, [%x[bn_mul], #0x10]\n"
+ "mov x12, %x[in0]\n"
+ "mov x11, %x[in1]\n"
+ "ldr q26, [%x[bn_mul], #0x20]\n"
+ "ldr q27, [%x[bn_mul], #0x30]\n"
+ "mov x10, %x[out]\n"
+ "mov x9, %x[out_direct]\n"
+ "ldr q28, [%x[bn_add], #0x0]\n"
+ "ldr q29, [%x[bn_add], #0x10]\n"
+ "mov x20, %x[height]\n"
+ "mov x28, x12\n"
+ "ldr q30, [%x[bn_add], #0x20]\n"
+ "ldr q31, [%x[bn_add], #0x30]\n"
+ "mov x27, x11\n"
+ "mov x26, x10\n"
+ "ldr q11, [x28, #0x0]\n"
+ "ldr q10, [x27, #0x0]\n"
+ "mov x25, x9\n"
+ "add x24, x28, %x[in0_stride]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q8, [x27, #0x10]\n"
+ "add x23, x27, %x[in1_stride]\n"
+ "add x22, x26, %x[out_stride]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q6, [x27, #0x20]\n"
+ "add x21, x25, %x[out_direct_stride]\n"
+ "cmp x20, #0x2\n"
+ "ldr q5, [x28, #0x30]\n"
+ "ldr q4, [x27, #0x30]\n"
+ "add x12, x24, %x[in0_stride]\n"
+ "add x11, x23, %x[in1_stride]\n"
+ "add x10, x22, %x[out_stride]\n"
+ "add x9, x21, %x[out_direct_stride]\n"
+ "csel x24, x24, x28, GE\n"
+ "csel x23, x23, x27, GE\n"
+ "csel x22, x22, x26, GE\n"
+ "csel x21, x21, x25, GE\n"
+ "subs x20, x20, #0x2\n"
+ "add %x[bn_mul], %x[bn_mul], #0x40\n"
+ "add %x[bn_add], %x[bn_add], #0x40\n"
+ "add x28, x28, #0x40\n"
+ "add x27, x27, #0x40\n"
+ "ble 4f\n"
+ "2:" // Row loop
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "fadd v2.4s, v11.4s, v10.4s\n"
+ "fadd v1.4s, v9.4s, v8.4s\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q20, [x23, #0x10]\n"
+ "fadd v0.4s, v7.4s, v6.4s\n"
+ "fadd v23.4s, v5.4s, v4.4s\n"
+ "ldr q19, [x24, #0x20]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "fadd v22.4s, v3.4s, v22.4s\n"
+ "fadd v21.4s, v21.4s, v20.4s\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x30]\n"
+ "fadd v20.4s, v19.4s, v18.4s\n"
+ "fadd v19.4s, v17.4s, v16.4s\n"
+ "add x24, x24, #0x40\n"
+ "add x23, x23, #0x40\n"
+ "cbz %x[out_direct], 3f\n"
+ "str q2, [x25, #0x0]\n"
+ "str q1, [x25, #0x10]\n"
+ "str q0, [x25, #0x20]\n"
+ "str q23, [x25, #0x30]\n"
+ "add x25, x25, #0x40\n"
+ "str q22, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q20, [x21, #0x20]\n"
+ "str q19, [x21, #0x30]\n"
+ "add x21, x21, #0x40\n"
+ "3:" // Main loop: No direct output
+ "mov v16.16b, v2.16b\n"
+ "mov v2.16b, v28.16b\n"
+ "fmla v2.4s, v16.4s, v24.4s\n"
+ "mov x28, x12\n"
+ "ldr q11, [x28, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "mov v18.16b, v1.16b\n"
+ "mov v1.16b, v29.16b\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q5, [x28, #0x30]\n"
+ "mov v17.16b, v0.16b\n"
+ "mov v0.16b, v30.16b\n"
+ "mov v16.16b, v23.16b\n"
+ "mov v23.16b, v31.16b\n"
+ "fmla v1.4s, v18.4s, v25.4s\n"
+ "mov x27, x11\n"
+ "ldr q10, [x27, #0x0]\n"
+ "ldr q8, [x27, #0x10]\n"
+ "fmla v0.4s, v17.4s, v26.4s\n"
+ "fmla v23.4s, v16.4s, v27.4s\n"
+ "ldr q6, [x27, #0x20]\n"
+ "ldr q4, [x27, #0x30]\n"
+ "mov v17.16b, v22.16b\n"
+ "mov v22.16b, v28.16b\n"
+ "mov v16.16b, v21.16b\n"
+ "mov v21.16b, v29.16b\n"
+ "fmla v22.4s, v17.4s, v24.4s\n"
+ "mov x25, x9\n"
+ "mov v17.16b, v20.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "fmla v21.4s, v16.4s, v25.4s\n"
+ "add x24, x28, %x[in0_stride]\n"
+ "mov v16.16b, v19.16b\n"
+ "mov v19.16b, v31.16b\n"
+ "fmla v20.4s, v17.4s, v26.4s\n"
+ "add x23, x27, %x[in1_stride]\n"
+ "fmla v19.4s, v16.4s, v27.4s\n"
+ "fmin v2.4s, v2.4s, v12.4s\n"
+ "add x21, x25, %x[out_direct_stride]\n"
+ "cmp x20, #0x2\n"
+ "fmin v1.4s, v1.4s, v12.4s\n"
+ "fmin v0.4s, v0.4s, v12.4s\n"
+ "add x12, x24, %x[in0_stride]\n"
+ "add x11, x23, %x[in1_stride]\n"
+ "fmin v23.4s, v23.4s, v12.4s\n"
+ "fmax v2.4s, v2.4s, v13.4s\n"
+ "str q2, [x26, #0x0]\n"
+ "add x9, x21, %x[out_direct_stride]\n"
+ "fmax v1.4s, v1.4s, v13.4s\n"
+ "fmax v0.4s, v0.4s, v13.4s\n"
+ "str q1, [x26, #0x10]\n"
+ "csel x24, x24, x28, GE\n"
+ "fmax v23.4s, v23.4s, v13.4s\n"
+ "fmin v22.4s, v22.4s, v12.4s\n"
+ "str q0, [x26, #0x20]\n"
+ "csel x23, x23, x27, GE\n"
+ "fmin v21.4s, v21.4s, v12.4s\n"
+ "fmin v20.4s, v20.4s, v12.4s\n"
+ "str q23, [x26, #0x30]\n"
+ "mov x26, x10\n"
+ "fmin v19.4s, v19.4s, v12.4s\n"
+ "fmax v22.4s, v22.4s, v13.4s\n"
+ "str q22, [x22, #0x0]\n"
+ "csel x21, x21, x25, GE\n"
+ "fmax v21.4s, v21.4s, v13.4s\n"
+ "fmax v20.4s, v20.4s, v13.4s\n"
+ "str q21, [x22, #0x10]\n"
+ "add x28, x28, #0x40\n"
+ "fmax v19.4s, v19.4s, v13.4s\n"
+ "str q20, [x22, #0x20]\n"
+ "add x27, x27, #0x40\n"
+ "str q19, [x22, #0x30]\n"
+ "add x22, x26, %x[out_stride]\n"
+ "add x10, x22, %x[out_stride]\n"
+ "csel x22, x22, x26, GE\n"
+ "subs x20, x20, #0x2\n"
+ "bgt 2b\n"
+ "4:" // Row loop skip
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "fadd v2.4s, v11.4s, v10.4s\n"
+ "fadd v1.4s, v9.4s, v8.4s\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q20, [x23, #0x10]\n"
+ "fadd v0.4s, v7.4s, v6.4s\n"
+ "fadd v23.4s, v5.4s, v4.4s\n"
+ "ldr q19, [x24, #0x20]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "fadd v22.4s, v3.4s, v22.4s\n"
+ "fadd v21.4s, v21.4s, v20.4s\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x30]\n"
+ "fadd v20.4s, v19.4s, v18.4s\n"
+ "fadd v19.4s, v17.4s, v16.4s\n"
+ "add x24, x24, #0x40\n"
+ "add x23, x23, #0x40\n"
+ "cbz %x[out_direct], 5f\n"
+ "str q2, [x25, #0x0]\n"
+ "str q1, [x25, #0x10]\n"
+ "str q0, [x25, #0x20]\n"
+ "str q23, [x25, #0x30]\n"
+ "add x25, x25, #0x40\n"
+ "str q22, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "str q20, [x21, #0x20]\n"
+ "str q19, [x21, #0x30]\n"
+ "add x21, x21, #0x40\n"
+ "5:" // Tail loop: No direct output
+ "mov v16.16b, v2.16b\n"
+ "mov v2.16b, v28.16b\n"
+ "fmla v2.4s, v16.4s, v24.4s\n"
+ "add %x[in0], %x[in0], #0x40\n"
+ "mov v16.16b, v1.16b\n"
+ "mov v1.16b, v29.16b\n"
+ "fmla v1.4s, v16.4s, v25.4s\n"
+ "add %x[in1], %x[in1], #0x40\n"
+ "mov v16.16b, v0.16b\n"
+ "mov v0.16b, v30.16b\n"
+ "fmla v0.4s, v16.4s, v26.4s\n"
+ "add %x[out], %x[out], #0x40\n"
+ "mov v16.16b, v23.16b\n"
+ "mov v23.16b, v31.16b\n"
+ "fmla v23.4s, v16.4s, v27.4s\n"
+ "mov v16.16b, v22.16b\n"
+ "mov v22.16b, v28.16b\n"
+ "fmla v22.4s, v16.4s, v24.4s\n"
+ "mov v16.16b, v21.16b\n"
+ "mov v21.16b, v29.16b\n"
+ "fmla v21.4s, v16.4s, v25.4s\n"
+ "mov v16.16b, v20.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "fmla v20.4s, v16.4s, v26.4s\n"
+ "mov v16.16b, v19.16b\n"
+ "mov v19.16b, v31.16b\n"
+ "fmla v19.4s, v16.4s, v27.4s\n"
+ "fmin v2.4s, v2.4s, v12.4s\n"
+ "fmin v1.4s, v1.4s, v12.4s\n"
+ "fmin v0.4s, v0.4s, v12.4s\n"
+ "fmin v23.4s, v23.4s, v12.4s\n"
+ "fmin v22.4s, v22.4s, v12.4s\n"
+ "fmin v21.4s, v21.4s, v12.4s\n"
+ "fmin v20.4s, v20.4s, v12.4s\n"
+ "fmin v19.4s, v19.4s, v12.4s\n"
+ "fmax v2.4s, v2.4s, v13.4s\n"
+ "fmax v1.4s, v1.4s, v13.4s\n"
+ "str q2, [x26, #0x0]\n"
+ "fmax v0.4s, v0.4s, v13.4s\n"
+ "fmax v23.4s, v23.4s, v13.4s\n"
+ "str q1, [x26, #0x10]\n"
+ "fmax v22.4s, v22.4s, v13.4s\n"
+ "fmax v21.4s, v21.4s, v13.4s\n"
+ "str q0, [x26, #0x20]\n"
+ "fmax v20.4s, v20.4s, v13.4s\n"
+ "fmax v19.4s, v19.4s, v13.4s\n"
+ "str q23, [x26, #0x30]\n"
+ "add x26, x26, #0x40\n"
+ "str q22, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q20, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "add x22, x22, #0x40\n"
+ "cbz %x[out_direct], 6f\n"
+ "add %x[out_direct], %x[out_direct], #0x40\n"
+ "6:" // No direct pointer update
+ "sub %x[width], %x[width], #0x10\n"
+ "cmp %x[width], #0x10\n"
+ "bge 1b\n"
+ "cbz %x[width], 34f\n"
+ "7:" // main loop skip
+ "ldr q24, [%x[bn_mul], #0x0]\n"
+ "ldr q25, [%x[bn_mul], #0x10]\n"
+ "mov x20, %x[height]\n"
+ "mov x12, %x[in0]\n"
+ "ldr q26, [%x[bn_mul], #0x20]\n"
+ "ldr q27, [%x[bn_mul], #0x30]\n"
+ "mov x11, %x[in1]\n"
+ "mov x10, %x[out]\n"
+ "ldr q28, [%x[bn_add], #0x0]\n"
+ "ldr q29, [%x[bn_add], #0x10]\n"
+ "mov x9, %x[out_direct]\n"
+ "add %x[bn_mul], %x[bn_mul], #0x40\n"
+ "ldr q30, [%x[bn_add], #0x20]\n"
+ "ldr q31, [%x[bn_add], #0x30]\n"
+ "add %x[bn_add], %x[bn_add], #0x40\n"
+ "8:" // tail loop: Row loop
+ "mov x28, x12\n"
+ "mov x27, x11\n"
+ "mov x26, x10\n"
+ "mov x25, x9\n"
+ "add x24, x28, %x[in0_stride]\n"
+ "add x23, x27, %x[in1_stride]\n"
+ "add x22, x26, %x[out_stride]\n"
+ "add x21, x25, %x[out_direct_stride]\n"
+ "cmp x20, #0x2\n"
+ "add x12, x24, %x[in0_stride]\n"
+ "add x11, x23, %x[in1_stride]\n"
+ "add x10, x22, %x[out_stride]\n"
+ "add x9, x21, %x[out_direct_stride]\n"
+ "csel x24, x24, x28, GE\n"
+ "csel x23, x23, x27, GE\n"
+ "csel x22, x22, x26, GE\n"
+ "csel x21, x21, x25, GE\n"
+ "tbz %x[width], #3, 12f\n"
+ "ldr q11, [x28, #0x0]\n"
+ "ldr q10, [x27, #0x0]\n"
+ "ldr q9, [x28, #0x10]\n"
+ "ldr q8, [x27, #0x10]\n"
+ "add x28, x28, #0x20\n"
+ "add x27, x27, #0x20\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q20, [x23, #0x10]\n"
+ "add x24, x24, #0x20\n"
+ "add x23, x23, #0x20\n"
+ "tbz %x[width], #2, 10f\n"
+ "ldr q7, [x28, #0x0]\n"
+ "ldr q6, [x27, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "add x27, x27, #0x10\n"
+ "ldr q19, [x24, #0x0]\n"
+ "ldr q18, [x23, #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "tbz %x[width], #1, 9f\n"
+ "ldr d5, [x28], #0x8\n"
+ "ldr d4, [x27], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "tbz %x[width], #0, 16f\n"
+ "ld1 { v5.s }[2], [x28], #0x4\n"
+ "ld1 { v4.s }[2], [x27], #0x4\n"
+ "ld1 { v17.s }[2], [x24], #0x4\n"
+ "ld1 { v16.s }[2], [x23], #0x4\n"
+ "b 16f\n"
+ "9:" // tail loop: unique 1: partial_0_12
+ "tbz %x[width], #0, 16f\n"
+ "ldr s5, [x28], #0x4\n"
+ "ldr s4, [x27], #0x4\n"
+ "ldr s17, [x24], #0x4\n"
+ "ldr s16, [x23], #0x4\n"
+ "b 16f\n"
+ "10:" // tail loop: unique 1: partial_1_8
+ "tbz %x[width], #1, 11f\n"
+ "ldr d7, [x28], #0x8\n"
+ "ldr d6, [x27], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "tbz %x[width], #0, 16f\n"
+ "ld1 { v7.s }[2], [x28], #0x4\n"
+ "ld1 { v6.s }[2], [x27], #0x4\n"
+ "ld1 { v19.s }[2], [x24], #0x4\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
+ "b 16f\n"
+ "11:" // tail loop: unique 1: partial_0_8
+ "tbz %x[width], #0, 16f\n"
+ "ldr s7, [x28], #0x4\n"
+ "ldr s6, [x27], #0x4\n"
+ "ldr s19, [x24], #0x4\n"
+ "ldr s18, [x23], #0x4\n"
+ "b 16f\n"
+ "12:" // tail loop: unique 1: partial_2_0
+ "tbz %x[width], #2, 14f\n"
+ "ldr q11, [x28, #0x0]\n"
+ "ldr q10, [x27, #0x0]\n"
+ "add x28, x28, #0x10\n"
+ "add x27, x27, #0x10\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q22, [x23, #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "tbz %x[width], #1, 13f\n"
+ "ldr d9, [x28], #0x8\n"
+ "ldr d8, [x27], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d20, [x23], #0x8\n"
+ "tbz %x[width], #0, 16f\n"
+ "ld1 { v9.s }[2], [x28], #0x4\n"
+ "ld1 { v8.s }[2], [x27], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v20.s }[2], [x23], #0x4\n"
+ "b 16f\n"
+ "13:" // tail loop: unique 1: partial_0_4
+ "tbz %x[width], #0, 16f\n"
+ "ldr s9, [x28], #0x4\n"
+ "ldr s8, [x27], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s20, [x23], #0x4\n"
+ "b 16f\n"
+ "14:" // tail loop: unique 1: partial_1_0
+ "tbz %x[width], #1, 15f\n"
+ "ldr d11, [x28], #0x8\n"
+ "ldr d10, [x27], #0x8\n"
+ "ldr d3, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "tbz %x[width], #0, 16f\n"
+ "ld1 { v11.s }[2], [x28], #0x4\n"
+ "ld1 { v10.s }[2], [x27], #0x4\n"
+ "ld1 { v3.s }[2], [x24], #0x4\n"
+ "ld1 { v22.s }[2], [x23], #0x4\n"
+ "b 16f\n"
+ "15:" // tail loop: unique 1: partial_0_0
+ "ldr s11, [x28], #0x4\n"
+ "ldr s10, [x27], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s22, [x23], #0x4\n"
+ "16:" // tail loop: unique 1: Done
+ "fadd v2.4s, v11.4s, v10.4s\n"
+ "fadd v1.4s, v9.4s, v8.4s\n"
+ "fadd v0.4s, v7.4s, v6.4s\n"
+ "fadd v23.4s, v5.4s, v4.4s\n"
+ "fadd v22.4s, v3.4s, v22.4s\n"
+ "fadd v21.4s, v21.4s, v20.4s\n"
+ "fadd v20.4s, v19.4s, v18.4s\n"
+ "fadd v19.4s, v17.4s, v16.4s\n"
+ "cbz %x[out_direct], 25f\n"
+ "tbz %x[width], #3, 20f\n"
+ "str q2, [x25, #0x0]\n"
+ "str q1, [x25, #0x10]\n"
+ "add x25, x25, #0x20\n"
+ "str q22, [x21, #0x0]\n"
+ "str q21, [x21, #0x10]\n"
+ "add x21, x21, #0x20\n"
+ "tbz %x[width], #2, 18f\n"
+ "str q0, [x25, #0x0]\n"
+ "add x25, x25, #0x10\n"
+ "str q20, [x21, #0x0]\n"
+ "add x21, x21, #0x10\n"
+ "tbz %x[width], #1, 17f\n"
+ "str d23, [x25], #0x8\n"
+ "str d19, [x21], #0x8\n"
+ "tbz %x[width], #0, 24f\n"
+ "st1 { v23.s }[2], [x25], #0x4\n"
+ "st1 { v19.s }[2], [x21], #0x4\n"
+ "b 24f\n"
+ "17:" // tail loop: Main loop: unique 2: partial_0_12
+ "tbz %x[width], #0, 24f\n"
+ "str s23, [x25], #0x4\n"
+ "str s19, [x21], #0x4\n"
+ "b 24f\n"
+ "18:" // tail loop: Main loop: unique 2: partial_1_8
+ "tbz %x[width], #1, 19f\n"
+ "str d0, [x25], #0x8\n"
+ "str d20, [x21], #0x8\n"
+ "tbz %x[width], #0, 24f\n"
+ "st1 { v0.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x21], #0x4\n"
+ "b 24f\n"
+ "19:" // tail loop: Main loop: unique 2: partial_0_8
+ "tbz %x[width], #0, 24f\n"
+ "str s0, [x25], #0x4\n"
+ "str s20, [x21], #0x4\n"
+ "b 24f\n"
+ "20:" // tail loop: Main loop: unique 2: partial_2_0
+ "tbz %x[width], #2, 22f\n"
+ "str q2, [x25, #0x0]\n"
+ "add x25, x25, #0x10\n"
+ "str q22, [x21, #0x0]\n"
+ "add x21, x21, #0x10\n"
+ "tbz %x[width], #1, 21f\n"
+ "str d1, [x25], #0x8\n"
+ "str d21, [x21], #0x8\n"
+ "tbz %x[width], #0, 24f\n"
+ "st1 { v1.s }[2], [x25], #0x4\n"
+ "st1 { v21.s }[2], [x21], #0x4\n"
+ "b 24f\n"
+ "21:" // tail loop: Main loop: unique 2: partial_0_4
+ "tbz %x[width], #0, 24f\n"
+ "str s1, [x25], #0x4\n"
+ "str s21, [x21], #0x4\n"
+ "b 24f\n"
+ "22:" // tail loop: Main loop: unique 2: partial_1_0
+ "tbz %x[width], #1, 23f\n"
+ "str d2, [x25], #0x8\n"
+ "str d22, [x21], #0x8\n"
+ "tbz %x[width], #0, 24f\n"
+ "st1 { v2.s }[2], [x25], #0x4\n"
+ "st1 { v22.s }[2], [x21], #0x4\n"
+ "b 24f\n"
+ "23:" // tail loop: Main loop: unique 2: partial_0_0
+ "str s2, [x25], #0x4\n"
+ "str s22, [x21], #0x4\n"
+ "24:" // tail loop: Main loop: unique 2: Done
+ "25:" // tail loop: Main loop: No direct output
+ "mov v16.16b, v2.16b\n"
+ "mov v2.16b, v28.16b\n"
+ "fmla v2.4s, v16.4s, v24.4s\n"
+ "mov v16.16b, v1.16b\n"
+ "mov v1.16b, v29.16b\n"
+ "fmla v1.4s, v16.4s, v25.4s\n"
+ "mov v16.16b, v0.16b\n"
+ "mov v0.16b, v30.16b\n"
+ "fmla v0.4s, v16.4s, v26.4s\n"
+ "mov v16.16b, v23.16b\n"
+ "mov v23.16b, v31.16b\n"
+ "fmla v23.4s, v16.4s, v27.4s\n"
+ "mov v16.16b, v22.16b\n"
+ "mov v22.16b, v28.16b\n"
+ "fmla v22.4s, v16.4s, v24.4s\n"
+ "mov v16.16b, v21.16b\n"
+ "mov v21.16b, v29.16b\n"
+ "fmla v21.4s, v16.4s, v25.4s\n"
+ "mov v16.16b, v20.16b\n"
+ "mov v20.16b, v30.16b\n"
+ "fmla v20.4s, v16.4s, v26.4s\n"
+ "mov v16.16b, v19.16b\n"
+ "mov v19.16b, v31.16b\n"
+ "fmla v19.4s, v16.4s, v27.4s\n"
+ "fmin v2.4s, v2.4s, v12.4s\n"
+ "fmin v1.4s, v1.4s, v12.4s\n"
+ "fmin v0.4s, v0.4s, v12.4s\n"
+ "fmin v23.4s, v23.4s, v12.4s\n"
+ "fmin v22.4s, v22.4s, v12.4s\n"
+ "fmin v21.4s, v21.4s, v12.4s\n"
+ "fmin v20.4s, v20.4s, v12.4s\n"
+ "fmin v19.4s, v19.4s, v12.4s\n"
+ "fmax v2.4s, v2.4s, v13.4s\n"
+ "fmax v1.4s, v1.4s, v13.4s\n"
+ "fmax v0.4s, v0.4s, v13.4s\n"
+ "fmax v23.4s, v23.4s, v13.4s\n"
+ "fmax v22.4s, v22.4s, v13.4s\n"
+ "fmax v21.4s, v21.4s, v13.4s\n"
+ "fmax v20.4s, v20.4s, v13.4s\n"
+ "fmax v19.4s, v19.4s, v13.4s\n"
+ "tbz %x[width], #3, 29f\n"
+ "str q2, [x26, #0x0]\n"
+ "str q1, [x26, #0x10]\n"
+ "add x26, x26, #0x20\n"
+ "str q22, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "add x22, x22, #0x20\n"
+ "tbz %x[width], #2, 27f\n"
+ "str q0, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q20, [x22, #0x0]\n"
+ "add x22, x22, #0x10\n"
+ "tbz %x[width], #1, 26f\n"
+ "str d23, [x26], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "tbz %x[width], #0, 33f\n"
+ "st1 { v23.s }[2], [x26], #0x4\n"
+ "st1 { v19.s }[2], [x22], #0x4\n"
+ "b 33f\n"
+ "26:" // tail loop: unique 3: partial_0_12
+ "tbz %x[width], #0, 33f\n"
+ "str s23, [x26], #0x4\n"
+ "str s19, [x22], #0x4\n"
+ "b 33f\n"
+ "27:" // tail loop: unique 3: partial_1_8
+ "tbz %x[width], #1, 28f\n"
+ "str d0, [x26], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "tbz %x[width], #0, 33f\n"
+ "st1 { v0.s }[2], [x26], #0x4\n"
+ "st1 { v20.s }[2], [x22], #0x4\n"
+ "b 33f\n"
+ "28:" // tail loop: unique 3: partial_0_8
+ "tbz %x[width], #0, 33f\n"
+ "str s0, [x26], #0x4\n"
+ "str s20, [x22], #0x4\n"
+ "b 33f\n"
+ "29:" // tail loop: unique 3: partial_2_0
+ "tbz %x[width], #2, 31f\n"
+ "str q2, [x26, #0x0]\n"
+ "add x26, x26, #0x10\n"
+ "str q22, [x22, #0x0]\n"
+ "add x22, x22, #0x10\n"
+ "tbz %x[width], #1, 30f\n"
+ "str d1, [x26], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "tbz %x[width], #0, 33f\n"
+ "st1 { v1.s }[2], [x26], #0x4\n"
+ "st1 { v21.s }[2], [x22], #0x4\n"
+ "b 33f\n"
+ "30:" // tail loop: unique 3: partial_0_4
+ "tbz %x[width], #0, 33f\n"
+ "str s1, [x26], #0x4\n"
+ "str s21, [x22], #0x4\n"
+ "b 33f\n"
+ "31:" // tail loop: unique 3: partial_1_0
+ "tbz %x[width], #1, 32f\n"
+ "str d2, [x26], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "tbz %x[width], #0, 33f\n"
+ "st1 { v2.s }[2], [x26], #0x4\n"
+ "st1 { v22.s }[2], [x22], #0x4\n"
+ "b 33f\n"
+ "32:" // tail loop: unique 3: partial_0_0
+ "str s2, [x26], #0x4\n"
+ "str s22, [x22], #0x4\n"
+ "33:" // tail loop: unique 3: Done
+ "subs x20, x20, #0x2\n"
+ "bgt 8b\n"
+ "34:" // odd columns skip
+ : [bn_add] "+&r"(bn_add), [bn_mul] "+&r"(bn_mul), [in0] "+&r"(in0), [in1] "+&r"(in1), [out] "+&r"(out), [out_direct] "+&r"(out_direct), [width] "+&r"(width)
+ : [args_ptr] "r"(&ka), [height] "r"(height), [in0_stride] "r"(in0_stride), [in1_stride] "r"(in1_stride), [offsetof_maxval] "I"(offsetof(KernelArgs, maxval)), [offsetof_minval] "I"(offsetof(KernelArgs, minval)), [out_direct_stride] "r"(out_direct_stride), [out_stride] "r"(out_stride)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28");
+}
+}
+
+namespace arm_compute
+{
+namespace cpu
+{
+void add_mul_add_fp32_neon(const ITensor *input1, const ITensor *input2, const ITensor *bn_mul, const ITensor *bn_add,
+ ITensor *add_output, ITensor *final_output, ConvertPolicy policy, const ActivationLayerInfo &act_info, const Window &window)
+{
+ ARM_COMPUTE_UNUSED(policy);
+
+ const size_t out_stride = final_output->info()->strides_in_bytes()[1];
+ const size_t out_direct_stride = (add_output != nullptr) ? add_output->info()->strides_in_bytes()[1] : 0;
+ const size_t in0_stride = input1->info()->strides_in_bytes()[1];
+ const size_t in1_stride = input2->info()->strides_in_bytes()[1];
+
+ float minval = std::numeric_limits<float>::lowest();
+ float maxval = std::numeric_limits<float>::max();
+
+ if(act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU)
+ {
+ minval = 0.f;
+ }
+ else if(act_info.activation() == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
+ {
+ minval = 0.f;
+ maxval = act_info.a();
+ }
+ else if(act_info.activation() == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+ {
+ minval = act_info.b();
+ maxval = act_info.a();
+ }
+
+ // Clear X & Y dimensions on execution window as we handle manually
+ Window win = window;
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+ win.set(Window::DimY, Window::Dimension(0, 1, 1));
+
+ Iterator in1_it(input1, window);
+ Iterator in2_it(input2, window);
+ Iterator out_it(final_output, window);
+
+ const size_t width = window.num_iterations(0);
+ const size_t height = window.num_iterations(1);
+
+ if(add_output != nullptr)
+ {
+ Iterator add_out_it(add_output, window);
+ execute_window_loop(
+ win, [&](const Coordinates &)
+ {
+ a64_add_bn_clamp_direct_fp32_2x16(
+ reinterpret_cast<float *>(out_it.ptr()), out_stride,
+ reinterpret_cast<float *>(add_out_it.ptr()), out_direct_stride,
+ reinterpret_cast<float *>(in1_it.ptr()), in0_stride,
+ reinterpret_cast<float *>(in2_it.ptr()), in1_stride,
+ reinterpret_cast<float *>(bn_mul->buffer()),
+ reinterpret_cast<float *>(bn_add->buffer()),
+ minval,
+ maxval,
+ width, height);
+ },
+ in1_it, in2_it, add_out_it, out_it);
+ }
+ else
+ {
+ execute_window_loop(
+ win, [&](const Coordinates &)
+ {
+ a64_add_bn_clamp_direct_fp32_2x16(
+ reinterpret_cast<float *>(out_it.ptr()), out_stride,
+ nullptr, out_direct_stride,
+ reinterpret_cast<float *>(in1_it.ptr()), in0_stride,
+ reinterpret_cast<float *>(in2_it.ptr()), in1_stride,
+ reinterpret_cast<float *>(bn_mul->buffer()),
+ reinterpret_cast<float *>(bn_add->buffer()),
+ minval,
+ maxval,
+ width, height);
+ },
+ in1_it, in2_it, out_it);
+ }
+}
+} // namespace cpu
+} // namespace arm_compute
+
+#endif // __aarch64__
diff --git a/src/cpu/kernels/addmuladd/generic/neon/qasymm8.cpp b/src/cpu/kernels/addmuladd/generic/neon/qasymm8.cpp
new file mode 100644
index 0000000000..2c0ad70f8f
--- /dev/null
+++ b/src/cpu/kernels/addmuladd/generic/neon/qasymm8.cpp
@@ -0,0 +1,834 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/QuantizationInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Window.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+
+#ifdef __aarch64__
+namespace
+{
+void a64_add_bn_clamp_direct_u8_fp32_2x16(
+ uint8_t *out, size_t out_stride,
+ uint8_t *out_direct, size_t out_direct_stride,
+ const uint8_t *in0, size_t in0_stride,
+ const uint8_t *in1, size_t in1_stride,
+ const float *bn_mul,
+ const float *bn_add,
+ const uint8_t minval,
+ const uint8_t maxval,
+ int32_t out_zeropt, float out_scale,
+ int32_t out_direct_zeropt, float out_direct_scale,
+ int32_t in0_zeropt, float in0_scale,
+ int32_t in1_zeropt, float in1_scale,
+ size_t width, size_t height)
+{
+ float scales[4] = { in0_scale, in1_scale, 1.0f / out_scale, 1.0f / out_direct_scale };
+ struct KernelArgs
+ {
+ const float *scales;
+ int32_t in0_zeropt;
+ int32_t in1_zeropt;
+ int32_t out_zeropt;
+ int32_t out_direct_zeropt;
+ int32_t minval;
+ int32_t maxval;
+ } ka;
+ ka.scales = scales;
+ ka.in0_zeropt = in0_zeropt;
+ ka.in1_zeropt = in1_zeropt;
+ ka.out_zeropt = out_zeropt;
+ ka.out_direct_zeropt = out_direct_zeropt;
+ ka.minval = minval;
+ ka.maxval = maxval;
+
+ __asm__ __volatile__(
+ "ldr x20, [%x[args_ptr], %[offsetof_scales]]\n"
+ "ld1 { v0.4s }, [x20]\n"
+ "cmp %x[width], #0x10\n"
+ "blt 5f\n"
+ "1:" // Column loop
+ "ldr q24, [%x[bn_mul], #0x0]\n"
+ "ldr q25, [%x[bn_mul], #0x10]\n"
+ "mov x23, %x[height]\n"
+ "mov x12, %x[in0]\n"
+ "ldr q26, [%x[bn_mul], #0x20]\n"
+ "ldr q27, [%x[bn_mul], #0x30]\n"
+ "mov x11, %x[in1]\n"
+ "mov x10, %x[out]\n"
+ "ldr q28, [%x[bn_add], #0x0]\n"
+ "ldr q29, [%x[bn_add], #0x10]\n"
+ "mov x9, %x[out_direct]\n"
+ "add %x[bn_mul], %x[bn_mul], #0x40\n"
+ "ldr q30, [%x[bn_add], #0x20]\n"
+ "ldr q31, [%x[bn_add], #0x30]\n"
+ "add %x[bn_add], %x[bn_add], #0x40\n"
+ "2:" // Row loop
+ "mov x28, x12\n"
+ "ldr d4, [x28, #0x0]\n"
+ "ldr d3, [x28, #0x8]\n"
+ "add x21, x28, %x[in0_stride]\n"
+ "mov x27, x11\n"
+ "ldr d13, [x27, #0x0]\n"
+ "ldr d12, [x27, #0x8]\n"
+ "cmp x23, #0x2\n"
+ "add x12, x21, %x[in0_stride]\n"
+ "csel x21, x21, x28, GE\n"
+ "ldr d2, [x21, #0x0]\n"
+ "ldr d11, [x21, #0x8]\n"
+ "add x20, x27, %x[in1_stride]\n"
+ "add x11, x20, %x[in1_stride]\n"
+ "ldr w21, [%x[args_ptr], %[offsetof_in0_zeropt]]\n"
+ "ushll v4.8h, v4.8b, #0x0\n"
+ "csel x20, x20, x27, GE\n"
+ "ldr d10, [x20, #0x0]\n"
+ "ldr d9, [x20, #0x8]\n"
+ "ushll v3.8h, v3.8b, #0x0\n"
+ "ushll v2.8h, v2.8b, #0x0\n"
+ "ushll v11.8h, v11.8b, #0x0\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_in1_zeropt]]\n"
+ "mov x26, x10\n"
+ "dup v16.8h, w21\n"
+ "ushll v13.8h, v13.8b, #0x0\n"
+ "mov x25, x9\n"
+ "add x24, x26, %x[out_stride]\n"
+ "ushll v12.8h, v12.8b, #0x0\n"
+ "ushll v10.8h, v10.8b, #0x0\n"
+ "add x22, x25, %x[out_direct_stride]\n"
+ "add x10, x24, %x[out_stride]\n"
+ "ushll v9.8h, v9.8b, #0x0\n"
+ "ssubl v1.4s, v4.4h, v16.4h\n"
+ "add x9, x22, %x[out_direct_stride]\n"
+ "csel x24, x24, x26, GE\n"
+ "ssubl2 v4.4s, v4.8h, v16.8h\n"
+ "ssubl v23.4s, v3.4h, v16.4h\n"
+ "csel x22, x22, x25, GE\n"
+ "ssubl2 v3.4s, v3.8h, v16.8h\n"
+ "ssubl v22.4s, v2.4h, v16.4h\n"
+ "ssubl2 v2.4s, v2.8h, v16.8h\n"
+ "ssubl v21.4s, v11.4h, v16.4h\n"
+ "ssubl2 v11.4s, v11.8h, v16.8h\n"
+ "dup v20.8h, w20\n"
+ "ssubl v19.4s, v13.4h, v20.4h\n"
+ "ssubl2 v13.4s, v13.8h, v20.8h\n"
+ "ssubl v18.4s, v12.4h, v20.4h\n"
+ "ssubl2 v12.4s, v12.8h, v20.8h\n"
+ "ssubl v17.4s, v10.4h, v20.4h\n"
+ "ssubl2 v10.4s, v10.8h, v20.8h\n"
+ "ssubl v16.4s, v9.4h, v20.4h\n"
+ "ssubl2 v9.4s, v9.8h, v20.8h\n"
+ "scvtf v8.4s, v1.4s\n"
+ "scvtf v7.4s, v4.4s\n"
+ "scvtf v6.4s, v23.4s\n"
+ "scvtf v5.4s, v3.4s\n"
+ "scvtf v4.4s, v22.4s\n"
+ "scvtf v3.4s, v2.4s\n"
+ "scvtf v2.4s, v21.4s\n"
+ "scvtf v1.4s, v11.4s\n"
+ "scvtf v19.4s, v19.4s\n"
+ "fmul v8.4s, v8.4s, v0.s[0]\n"
+ "fmla v8.4s, v19.4s, v0.s[1]\n"
+ "scvtf v13.4s, v13.4s\n"
+ "fmul v7.4s, v7.4s, v0.s[0]\n"
+ "fmla v7.4s, v13.4s, v0.s[1]\n"
+ "scvtf v18.4s, v18.4s\n"
+ "fmul v6.4s, v6.4s, v0.s[0]\n"
+ "fmla v6.4s, v18.4s, v0.s[1]\n"
+ "scvtf v12.4s, v12.4s\n"
+ "fmul v5.4s, v5.4s, v0.s[0]\n"
+ "fmla v5.4s, v12.4s, v0.s[1]\n"
+ "scvtf v17.4s, v17.4s\n"
+ "fmul v4.4s, v4.4s, v0.s[0]\n"
+ "fmla v4.4s, v17.4s, v0.s[1]\n"
+ "scvtf v10.4s, v10.4s\n"
+ "fmul v3.4s, v3.4s, v0.s[0]\n"
+ "fmla v3.4s, v10.4s, v0.s[1]\n"
+ "scvtf v16.4s, v16.4s\n"
+ "fmul v2.4s, v2.4s, v0.s[0]\n"
+ "fmla v2.4s, v16.4s, v0.s[1]\n"
+ "scvtf v9.4s, v9.4s\n"
+ "fmul v1.4s, v1.4s, v0.s[0]\n"
+ "fmla v1.4s, v9.4s, v0.s[1]\n"
+ "cbz %x[out_direct], 3f\n"
+ "fmul v23.4s, v8.4s, v0.s[3]\n"
+ "fmul v22.4s, v7.4s, v0.s[3]\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_out_direct_zeropt]]\n"
+ "fmul v21.4s, v6.4s, v0.s[3]\n"
+ "fmul v20.4s, v5.4s, v0.s[3]\n"
+ "fmul v19.4s, v4.4s, v0.s[3]\n"
+ "fmul v18.4s, v3.4s, v0.s[3]\n"
+ "fmul v16.4s, v2.4s, v0.s[3]\n"
+ "fmul v17.4s, v1.4s, v0.s[3]\n"
+ "fcvtas v23.4s, v23.4s\n"
+ "fcvtas v22.4s, v22.4s\n"
+ "fcvtas v21.4s, v21.4s\n"
+ "fcvtas v20.4s, v20.4s\n"
+ "fcvtas v19.4s, v19.4s\n"
+ "fcvtas v18.4s, v18.4s\n"
+ "fcvtas v16.4s, v16.4s\n"
+ "fcvtas v17.4s, v17.4s\n"
+ "uzp1 v22.8h, v23.8h, v22.8h\n"
+ "uzp1 v20.8h, v21.8h, v20.8h\n"
+ "uzp1 v18.8h, v19.8h, v18.8h\n"
+ "uzp1 v17.8h, v16.8h, v17.8h\n"
+ "dup v16.8h, w20\n"
+ "add v22.8h, v22.8h, v16.8h\n"
+ "add v20.8h, v20.8h, v16.8h\n"
+ "add v18.8h, v18.8h, v16.8h\n"
+ "add v17.8h, v17.8h, v16.8h\n"
+ "movi v16.8h, #0xff\n"
+ "smin v22.8h, v22.8h, v16.8h\n"
+ "smin v20.8h, v20.8h, v16.8h\n"
+ "smin v18.8h, v18.8h, v16.8h\n"
+ "smin v17.8h, v17.8h, v16.8h\n"
+ "movi v16.8h, #0x0\n"
+ "smax v22.8h, v22.8h, v16.8h\n"
+ "smax v20.8h, v20.8h, v16.8h\n"
+ "smax v18.8h, v18.8h, v16.8h\n"
+ "smax v17.8h, v17.8h, v16.8h\n"
+ "xtn v22.8b, v22.8h\n"
+ "str d22, [x25, #0x0]\n"
+ "xtn v20.8b, v20.8h\n"
+ "xtn v18.8b, v18.8h\n"
+ "str d20, [x25, #0x8]\n"
+ "xtn v17.8b, v17.8h\n"
+ "str d18, [x22, #0x0]\n"
+ "str d17, [x22, #0x8]\n"
+ "3:" // Main loop: No direct output
+ "mov v19.16b, v28.16b\n"
+ "mov v13.16b, v29.16b\n"
+ "fmla v19.4s, v8.4s, v24.4s\n"
+ "ldr w22, [%x[args_ptr], %[offsetof_out_zeropt]]\n"
+ "mov v18.16b, v30.16b\n"
+ "mov v12.16b, v31.16b\n"
+ "fmla v13.4s, v7.4s, v25.4s\n"
+ "ldr w21, [%x[args_ptr], %[offsetof_maxval]]\n"
+ "mov v17.16b, v28.16b\n"
+ "mov v10.16b, v29.16b\n"
+ "fmla v18.4s, v6.4s, v26.4s\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_minval]]\n"
+ "mov v16.16b, v30.16b\n"
+ "mov v9.16b, v31.16b\n"
+ "fmla v12.4s, v5.4s, v27.4s\n"
+ "subs x23, x23, #0x2\n"
+ "fmla v17.4s, v4.4s, v24.4s\n"
+ "fmla v10.4s, v3.4s, v25.4s\n"
+ "fmul v8.4s, v19.4s, v0.s[2]\n"
+ "fmla v16.4s, v2.4s, v26.4s\n"
+ "fmla v9.4s, v1.4s, v27.4s\n"
+ "fmul v7.4s, v13.4s, v0.s[2]\n"
+ "fmul v6.4s, v18.4s, v0.s[2]\n"
+ "fmul v5.4s, v12.4s, v0.s[2]\n"
+ "fmul v4.4s, v17.4s, v0.s[2]\n"
+ "fmul v3.4s, v10.4s, v0.s[2]\n"
+ "fmul v2.4s, v16.4s, v0.s[2]\n"
+ "fmul v1.4s, v9.4s, v0.s[2]\n"
+ "fcvtas v8.4s, v8.4s\n"
+ "fcvtas v7.4s, v7.4s\n"
+ "fcvtas v6.4s, v6.4s\n"
+ "fcvtas v5.4s, v5.4s\n"
+ "fcvtas v4.4s, v4.4s\n"
+ "fcvtas v3.4s, v3.4s\n"
+ "fcvtas v2.4s, v2.4s\n"
+ "fcvtas v1.4s, v1.4s\n"
+ "uzp1 v7.8h, v8.8h, v7.8h\n"
+ "uzp1 v5.8h, v6.8h, v5.8h\n"
+ "uzp1 v3.8h, v4.8h, v3.8h\n"
+ "uzp1 v1.8h, v2.8h, v1.8h\n"
+ "dup v16.8h, w22\n"
+ "add v7.8h, v7.8h, v16.8h\n"
+ "add v5.8h, v5.8h, v16.8h\n"
+ "add v3.8h, v3.8h, v16.8h\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "dup v16.8h, w21\n"
+ "smin v7.8h, v7.8h, v16.8h\n"
+ "smin v5.8h, v5.8h, v16.8h\n"
+ "smin v3.8h, v3.8h, v16.8h\n"
+ "smin v1.8h, v1.8h, v16.8h\n"
+ "dup v16.8h, w20\n"
+ "smax v7.8h, v7.8h, v16.8h\n"
+ "smax v5.8h, v5.8h, v16.8h\n"
+ "smax v3.8h, v3.8h, v16.8h\n"
+ "smax v1.8h, v1.8h, v16.8h\n"
+ "xtn v7.8b, v7.8h\n"
+ "str d7, [x26, #0x0]\n"
+ "xtn v5.8b, v5.8h\n"
+ "xtn v3.8b, v3.8h\n"
+ "str d5, [x26, #0x8]\n"
+ "xtn v1.8b, v1.8h\n"
+ "str d3, [x24, #0x0]\n"
+ "str d1, [x24, #0x8]\n"
+ "bgt 2b\n"
+ "add %x[in0], %x[in0], #0x10\n"
+ "add %x[in1], %x[in1], #0x10\n"
+ "add %x[out], %x[out], #0x10\n"
+ "cbz %x[out_direct], 4f\n"
+ "add %x[out_direct], %x[out_direct], #0x10\n"
+ "4:" // No direct pointer update
+ "sub %x[width], %x[width], #0x10\n"
+ "cmp %x[width], #0x10\n"
+ "bge 1b\n"
+ "cbz %x[width], 32f\n"
+ "5:" // main loop skip
+ "ldr q24, [%x[bn_mul], #0x0]\n"
+ "ldr q25, [%x[bn_mul], #0x10]\n"
+ "mov x23, %x[height]\n"
+ "mov x12, %x[in0]\n"
+ "ldr q26, [%x[bn_mul], #0x20]\n"
+ "ldr q27, [%x[bn_mul], #0x30]\n"
+ "mov x11, %x[in1]\n"
+ "mov x10, %x[out]\n"
+ "ldr q28, [%x[bn_add], #0x0]\n"
+ "ldr q29, [%x[bn_add], #0x10]\n"
+ "mov x9, %x[out_direct]\n"
+ "add %x[bn_mul], %x[bn_mul], #0x40\n"
+ "ldr q30, [%x[bn_add], #0x20]\n"
+ "ldr q31, [%x[bn_add], #0x30]\n"
+ "add %x[bn_add], %x[bn_add], #0x40\n"
+ "6:" // tail loop: Row loop
+ "mov x28, x12\n"
+ "mov x27, x11\n"
+ "mov x26, x10\n"
+ "mov x25, x9\n"
+ "add x21, x28, %x[in0_stride]\n"
+ "add x20, x27, %x[in1_stride]\n"
+ "add x24, x26, %x[out_stride]\n"
+ "add x22, x25, %x[out_direct_stride]\n"
+ "cmp x23, #0x2\n"
+ "add x12, x21, %x[in0_stride]\n"
+ "add x11, x20, %x[in1_stride]\n"
+ "add x10, x24, %x[out_stride]\n"
+ "add x9, x22, %x[out_direct_stride]\n"
+ "csel x21, x21, x28, GE\n"
+ "csel x20, x20, x27, GE\n"
+ "csel x24, x24, x26, GE\n"
+ "csel x22, x22, x25, GE\n"
+ "tbz %x[width], #3, 10f\n"
+ "ldr d4, [x28, #0x0]\n"
+ "ldr d13, [x27, #0x0]\n"
+ "add x28, x28, #0x8\n"
+ "add x27, x27, #0x8\n"
+ "ldr d2, [x21, #0x0]\n"
+ "ldr d10, [x20, #0x0]\n"
+ "add x21, x21, #0x8\n"
+ "add x20, x20, #0x8\n"
+ "tbz %x[width], #2, 8f\n"
+ "ldr s3, [x28], #0x4\n"
+ "ldr s12, [x27], #0x4\n"
+ "ldr s11, [x21], #0x4\n"
+ "ldr s9, [x20], #0x4\n"
+ "tbz %x[width], #1, 7f\n"
+ "ld1 { v3.h }[2], [x28], #0x2\n"
+ "ld1 { v12.h }[2], [x27], #0x2\n"
+ "ld1 { v11.h }[2], [x21], #0x2\n"
+ "ld1 { v9.h }[2], [x20], #0x2\n"
+ "tbz %x[width], #0, 14f\n"
+ "ld1 { v3.b }[6], [x28], #0x1\n"
+ "ld1 { v12.b }[6], [x27], #0x1\n"
+ "ld1 { v11.b }[6], [x21], #0x1\n"
+ "ld1 { v9.b }[6], [x20], #0x1\n"
+ "b 14f\n"
+ "7:" // tail loop: unique 1: partial_0_12
+ "tbz %x[width], #0, 14f\n"
+ "ld1 { v3.b }[4], [x28], #0x1\n"
+ "ld1 { v12.b }[4], [x27], #0x1\n"
+ "ld1 { v11.b }[4], [x21], #0x1\n"
+ "ld1 { v9.b }[4], [x20], #0x1\n"
+ "b 14f\n"
+ "8:" // tail loop: unique 1: partial_1_8
+ "tbz %x[width], #1, 9f\n"
+ "ldr h3, [x28], #0x2\n"
+ "ldr h12, [x27], #0x2\n"
+ "ldr h11, [x21], #0x2\n"
+ "ldr h9, [x20], #0x2\n"
+ "tbz %x[width], #0, 14f\n"
+ "ld1 { v3.b }[2], [x28], #0x1\n"
+ "ld1 { v12.b }[2], [x27], #0x1\n"
+ "ld1 { v11.b }[2], [x21], #0x1\n"
+ "ld1 { v9.b }[2], [x20], #0x1\n"
+ "b 14f\n"
+ "9:" // tail loop: unique 1: partial_0_8
+ "tbz %x[width], #0, 14f\n"
+ "ldr b3, [x28], #0x1\n"
+ "ldr b12, [x27], #0x1\n"
+ "ldr b11, [x21], #0x1\n"
+ "ldr b9, [x20], #0x1\n"
+ "b 14f\n"
+ "10:" // tail loop: unique 1: partial_2_0
+ "tbz %x[width], #2, 12f\n"
+ "ldr s4, [x28], #0x4\n"
+ "ldr s13, [x27], #0x4\n"
+ "ldr s2, [x21], #0x4\n"
+ "ldr s10, [x20], #0x4\n"
+ "tbz %x[width], #1, 11f\n"
+ "ld1 { v4.h }[2], [x28], #0x2\n"
+ "ld1 { v13.h }[2], [x27], #0x2\n"
+ "ld1 { v2.h }[2], [x21], #0x2\n"
+ "ld1 { v10.h }[2], [x20], #0x2\n"
+ "tbz %x[width], #0, 14f\n"
+ "ld1 { v4.b }[6], [x28], #0x1\n"
+ "ld1 { v13.b }[6], [x27], #0x1\n"
+ "ld1 { v2.b }[6], [x21], #0x1\n"
+ "ld1 { v10.b }[6], [x20], #0x1\n"
+ "b 14f\n"
+ "11:" // tail loop: unique 1: partial_0_4
+ "tbz %x[width], #0, 14f\n"
+ "ld1 { v4.b }[4], [x28], #0x1\n"
+ "ld1 { v13.b }[4], [x27], #0x1\n"
+ "ld1 { v2.b }[4], [x21], #0x1\n"
+ "ld1 { v10.b }[4], [x20], #0x1\n"
+ "b 14f\n"
+ "12:" // tail loop: unique 1: partial_1_0
+ "tbz %x[width], #1, 13f\n"
+ "ldr h4, [x28], #0x2\n"
+ "ldr h13, [x27], #0x2\n"
+ "ldr h2, [x21], #0x2\n"
+ "ldr h10, [x20], #0x2\n"
+ "tbz %x[width], #0, 14f\n"
+ "ld1 { v4.b }[2], [x28], #0x1\n"
+ "ld1 { v13.b }[2], [x27], #0x1\n"
+ "ld1 { v2.b }[2], [x21], #0x1\n"
+ "ld1 { v10.b }[2], [x20], #0x1\n"
+ "b 14f\n"
+ "13:" // tail loop: unique 1: partial_0_0
+ "ldr b4, [x28], #0x1\n"
+ "ldr b13, [x27], #0x1\n"
+ "ldr b2, [x21], #0x1\n"
+ "ldr b10, [x20], #0x1\n"
+ "14:" // tail loop: unique 1: Done
+ "ldr w21, [%x[args_ptr], %[offsetof_in0_zeropt]]\n"
+ "ushll v4.8h, v4.8b, #0x0\n"
+ "ushll v3.8h, v3.8b, #0x0\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_in1_zeropt]]\n"
+ "ushll v2.8h, v2.8b, #0x0\n"
+ "ushll v11.8h, v11.8b, #0x0\n"
+ "dup v16.8h, w21\n"
+ "ushll v13.8h, v13.8b, #0x0\n"
+ "ushll v12.8h, v12.8b, #0x0\n"
+ "ushll v10.8h, v10.8b, #0x0\n"
+ "ushll v9.8h, v9.8b, #0x0\n"
+ "ssubl v1.4s, v4.4h, v16.4h\n"
+ "ssubl2 v4.4s, v4.8h, v16.8h\n"
+ "ssubl v23.4s, v3.4h, v16.4h\n"
+ "ssubl2 v3.4s, v3.8h, v16.8h\n"
+ "ssubl v22.4s, v2.4h, v16.4h\n"
+ "ssubl2 v2.4s, v2.8h, v16.8h\n"
+ "ssubl v21.4s, v11.4h, v16.4h\n"
+ "ssubl2 v11.4s, v11.8h, v16.8h\n"
+ "dup v20.8h, w20\n"
+ "ssubl v19.4s, v13.4h, v20.4h\n"
+ "ssubl2 v13.4s, v13.8h, v20.8h\n"
+ "ssubl v18.4s, v12.4h, v20.4h\n"
+ "ssubl2 v12.4s, v12.8h, v20.8h\n"
+ "ssubl v17.4s, v10.4h, v20.4h\n"
+ "ssubl2 v10.4s, v10.8h, v20.8h\n"
+ "ssubl v16.4s, v9.4h, v20.4h\n"
+ "ssubl2 v9.4s, v9.8h, v20.8h\n"
+ "scvtf v8.4s, v1.4s\n"
+ "scvtf v7.4s, v4.4s\n"
+ "scvtf v6.4s, v23.4s\n"
+ "scvtf v5.4s, v3.4s\n"
+ "scvtf v4.4s, v22.4s\n"
+ "scvtf v3.4s, v2.4s\n"
+ "scvtf v2.4s, v21.4s\n"
+ "scvtf v1.4s, v11.4s\n"
+ "scvtf v19.4s, v19.4s\n"
+ "fmul v8.4s, v8.4s, v0.s[0]\n"
+ "fmla v8.4s, v19.4s, v0.s[1]\n"
+ "scvtf v13.4s, v13.4s\n"
+ "fmul v7.4s, v7.4s, v0.s[0]\n"
+ "fmla v7.4s, v13.4s, v0.s[1]\n"
+ "scvtf v18.4s, v18.4s\n"
+ "fmul v6.4s, v6.4s, v0.s[0]\n"
+ "fmla v6.4s, v18.4s, v0.s[1]\n"
+ "scvtf v12.4s, v12.4s\n"
+ "fmul v5.4s, v5.4s, v0.s[0]\n"
+ "fmla v5.4s, v12.4s, v0.s[1]\n"
+ "scvtf v17.4s, v17.4s\n"
+ "fmul v4.4s, v4.4s, v0.s[0]\n"
+ "fmla v4.4s, v17.4s, v0.s[1]\n"
+ "scvtf v10.4s, v10.4s\n"
+ "fmul v3.4s, v3.4s, v0.s[0]\n"
+ "fmla v3.4s, v10.4s, v0.s[1]\n"
+ "scvtf v16.4s, v16.4s\n"
+ "fmul v2.4s, v2.4s, v0.s[0]\n"
+ "fmla v2.4s, v16.4s, v0.s[1]\n"
+ "scvtf v9.4s, v9.4s\n"
+ "fmul v1.4s, v1.4s, v0.s[0]\n"
+ "fmla v1.4s, v9.4s, v0.s[1]\n"
+ "cbz %x[out_direct], 23f\n"
+ "fmul v23.4s, v8.4s, v0.s[3]\n"
+ "fmul v22.4s, v7.4s, v0.s[3]\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_out_direct_zeropt]]\n"
+ "fmul v21.4s, v6.4s, v0.s[3]\n"
+ "fmul v20.4s, v5.4s, v0.s[3]\n"
+ "fmul v19.4s, v4.4s, v0.s[3]\n"
+ "fmul v18.4s, v3.4s, v0.s[3]\n"
+ "fmul v16.4s, v2.4s, v0.s[3]\n"
+ "fmul v17.4s, v1.4s, v0.s[3]\n"
+ "fcvtas v23.4s, v23.4s\n"
+ "fcvtas v22.4s, v22.4s\n"
+ "fcvtas v21.4s, v21.4s\n"
+ "fcvtas v20.4s, v20.4s\n"
+ "fcvtas v19.4s, v19.4s\n"
+ "fcvtas v18.4s, v18.4s\n"
+ "fcvtas v16.4s, v16.4s\n"
+ "fcvtas v17.4s, v17.4s\n"
+ "uzp1 v22.8h, v23.8h, v22.8h\n"
+ "uzp1 v20.8h, v21.8h, v20.8h\n"
+ "uzp1 v18.8h, v19.8h, v18.8h\n"
+ "uzp1 v17.8h, v16.8h, v17.8h\n"
+ "dup v16.8h, w20\n"
+ "add v22.8h, v22.8h, v16.8h\n"
+ "add v20.8h, v20.8h, v16.8h\n"
+ "add v18.8h, v18.8h, v16.8h\n"
+ "add v17.8h, v17.8h, v16.8h\n"
+ "movi v16.8h, #0xff\n"
+ "smin v22.8h, v22.8h, v16.8h\n"
+ "smin v20.8h, v20.8h, v16.8h\n"
+ "smin v18.8h, v18.8h, v16.8h\n"
+ "smin v17.8h, v17.8h, v16.8h\n"
+ "movi v16.8h, #0x0\n"
+ "smax v22.8h, v22.8h, v16.8h\n"
+ "smax v20.8h, v20.8h, v16.8h\n"
+ "smax v18.8h, v18.8h, v16.8h\n"
+ "smax v17.8h, v17.8h, v16.8h\n"
+ "xtn v22.8b, v22.8h\n"
+ "xtn v20.8b, v20.8h\n"
+ "xtn v18.8b, v18.8h\n"
+ "xtn v17.8b, v17.8h\n"
+ "tbz %x[width], #3, 18f\n"
+ "str d22, [x25, #0x0]\n"
+ "add x25, x25, #0x8\n"
+ "str d18, [x22, #0x0]\n"
+ "add x22, x22, #0x8\n"
+ "tbz %x[width], #2, 16f\n"
+ "str s20, [x25], #0x4\n"
+ "str s17, [x22], #0x4\n"
+ "tbz %x[width], #1, 15f\n"
+ "st1 { v20.h }[2], [x25], #0x2\n"
+ "st1 { v17.h }[2], [x22], #0x2\n"
+ "tbz %x[width], #0, 22f\n"
+ "st1 { v20.b }[6], [x25], #0x1\n"
+ "st1 { v17.b }[6], [x22], #0x1\n"
+ "b 22f\n"
+ "15:" // tail loop: Main loop: unique 2: partial_0_12
+ "tbz %x[width], #0, 22f\n"
+ "st1 { v20.b }[4], [x25], #0x1\n"
+ "st1 { v17.b }[4], [x22], #0x1\n"
+ "b 22f\n"
+ "16:" // tail loop: Main loop: unique 2: partial_1_8
+ "tbz %x[width], #1, 17f\n"
+ "str h20, [x25], #0x2\n"
+ "str h17, [x22], #0x2\n"
+ "tbz %x[width], #0, 22f\n"
+ "st1 { v20.b }[2], [x25], #0x1\n"
+ "st1 { v17.b }[2], [x22], #0x1\n"
+ "b 22f\n"
+ "17:" // tail loop: Main loop: unique 2: partial_0_8
+ "tbz %x[width], #0, 22f\n"
+ "str b20, [x25], #0x1\n"
+ "str b17, [x22], #0x1\n"
+ "b 22f\n"
+ "18:" // tail loop: Main loop: unique 2: partial_2_0
+ "tbz %x[width], #2, 20f\n"
+ "str s22, [x25], #0x4\n"
+ "str s18, [x22], #0x4\n"
+ "tbz %x[width], #1, 19f\n"
+ "st1 { v22.h }[2], [x25], #0x2\n"
+ "st1 { v18.h }[2], [x22], #0x2\n"
+ "tbz %x[width], #0, 22f\n"
+ "st1 { v22.b }[6], [x25], #0x1\n"
+ "st1 { v18.b }[6], [x22], #0x1\n"
+ "b 22f\n"
+ "19:" // tail loop: Main loop: unique 2: partial_0_4
+ "tbz %x[width], #0, 22f\n"
+ "st1 { v22.b }[4], [x25], #0x1\n"
+ "st1 { v18.b }[4], [x22], #0x1\n"
+ "b 22f\n"
+ "20:" // tail loop: Main loop: unique 2: partial_1_0
+ "tbz %x[width], #1, 21f\n"
+ "str h22, [x25], #0x2\n"
+ "str h18, [x22], #0x2\n"
+ "tbz %x[width], #0, 22f\n"
+ "st1 { v22.b }[2], [x25], #0x1\n"
+ "st1 { v18.b }[2], [x22], #0x1\n"
+ "b 22f\n"
+ "21:" // tail loop: Main loop: unique 2: partial_0_0
+ "str b22, [x25], #0x1\n"
+ "str b18, [x22], #0x1\n"
+ "22:" // tail loop: Main loop: unique 2: Done
+ "23:" // tail loop: Main loop: No direct output
+ "mov v19.16b, v28.16b\n"
+ "mov v13.16b, v29.16b\n"
+ "fmla v19.4s, v8.4s, v24.4s\n"
+ "ldr w22, [%x[args_ptr], %[offsetof_out_zeropt]]\n"
+ "mov v18.16b, v30.16b\n"
+ "mov v12.16b, v31.16b\n"
+ "fmla v13.4s, v7.4s, v25.4s\n"
+ "ldr w21, [%x[args_ptr], %[offsetof_maxval]]\n"
+ "mov v17.16b, v28.16b\n"
+ "mov v10.16b, v29.16b\n"
+ "fmla v18.4s, v6.4s, v26.4s\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_minval]]\n"
+ "mov v16.16b, v30.16b\n"
+ "mov v9.16b, v31.16b\n"
+ "fmla v12.4s, v5.4s, v27.4s\n"
+ "fmla v17.4s, v4.4s, v24.4s\n"
+ "fmla v10.4s, v3.4s, v25.4s\n"
+ "fmul v8.4s, v19.4s, v0.s[2]\n"
+ "fmla v16.4s, v2.4s, v26.4s\n"
+ "fmla v9.4s, v1.4s, v27.4s\n"
+ "fmul v7.4s, v13.4s, v0.s[2]\n"
+ "fmul v6.4s, v18.4s, v0.s[2]\n"
+ "fmul v5.4s, v12.4s, v0.s[2]\n"
+ "fmul v4.4s, v17.4s, v0.s[2]\n"
+ "fmul v3.4s, v10.4s, v0.s[2]\n"
+ "fmul v2.4s, v16.4s, v0.s[2]\n"
+ "fmul v1.4s, v9.4s, v0.s[2]\n"
+ "fcvtas v8.4s, v8.4s\n"
+ "fcvtas v7.4s, v7.4s\n"
+ "fcvtas v6.4s, v6.4s\n"
+ "fcvtas v5.4s, v5.4s\n"
+ "fcvtas v4.4s, v4.4s\n"
+ "fcvtas v3.4s, v3.4s\n"
+ "fcvtas v2.4s, v2.4s\n"
+ "fcvtas v1.4s, v1.4s\n"
+ "uzp1 v7.8h, v8.8h, v7.8h\n"
+ "uzp1 v5.8h, v6.8h, v5.8h\n"
+ "uzp1 v3.8h, v4.8h, v3.8h\n"
+ "uzp1 v1.8h, v2.8h, v1.8h\n"
+ "dup v16.8h, w22\n"
+ "add v7.8h, v7.8h, v16.8h\n"
+ "add v5.8h, v5.8h, v16.8h\n"
+ "add v3.8h, v3.8h, v16.8h\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "dup v16.8h, w21\n"
+ "smin v7.8h, v7.8h, v16.8h\n"
+ "smin v5.8h, v5.8h, v16.8h\n"
+ "smin v3.8h, v3.8h, v16.8h\n"
+ "smin v1.8h, v1.8h, v16.8h\n"
+ "dup v16.8h, w20\n"
+ "smax v7.8h, v7.8h, v16.8h\n"
+ "smax v5.8h, v5.8h, v16.8h\n"
+ "smax v3.8h, v3.8h, v16.8h\n"
+ "smax v1.8h, v1.8h, v16.8h\n"
+ "xtn v7.8b, v7.8h\n"
+ "xtn v5.8b, v5.8h\n"
+ "xtn v3.8b, v3.8h\n"
+ "xtn v1.8b, v1.8h\n"
+ "tbz %x[width], #3, 27f\n"
+ "str d7, [x26, #0x0]\n"
+ "add x26, x26, #0x8\n"
+ "str d3, [x24, #0x0]\n"
+ "add x24, x24, #0x8\n"
+ "tbz %x[width], #2, 25f\n"
+ "str s5, [x26], #0x4\n"
+ "str s1, [x24], #0x4\n"
+ "tbz %x[width], #1, 24f\n"
+ "st1 { v5.h }[2], [x26], #0x2\n"
+ "st1 { v1.h }[2], [x24], #0x2\n"
+ "tbz %x[width], #0, 31f\n"
+ "st1 { v5.b }[6], [x26], #0x1\n"
+ "st1 { v1.b }[6], [x24], #0x1\n"
+ "b 31f\n"
+ "24:" // tail loop: unique 3: partial_0_12
+ "tbz %x[width], #0, 31f\n"
+ "st1 { v5.b }[4], [x26], #0x1\n"
+ "st1 { v1.b }[4], [x24], #0x1\n"
+ "b 31f\n"
+ "25:" // tail loop: unique 3: partial_1_8
+ "tbz %x[width], #1, 26f\n"
+ "str h5, [x26], #0x2\n"
+ "str h1, [x24], #0x2\n"
+ "tbz %x[width], #0, 31f\n"
+ "st1 { v5.b }[2], [x26], #0x1\n"
+ "st1 { v1.b }[2], [x24], #0x1\n"
+ "b 31f\n"
+ "26:" // tail loop: unique 3: partial_0_8
+ "tbz %x[width], #0, 31f\n"
+ "str b5, [x26], #0x1\n"
+ "str b1, [x24], #0x1\n"
+ "b 31f\n"
+ "27:" // tail loop: unique 3: partial_2_0
+ "tbz %x[width], #2, 29f\n"
+ "str s7, [x26], #0x4\n"
+ "str s3, [x24], #0x4\n"
+ "tbz %x[width], #1, 28f\n"
+ "st1 { v7.h }[2], [x26], #0x2\n"
+ "st1 { v3.h }[2], [x24], #0x2\n"
+ "tbz %x[width], #0, 31f\n"
+ "st1 { v7.b }[6], [x26], #0x1\n"
+ "st1 { v3.b }[6], [x24], #0x1\n"
+ "b 31f\n"
+ "28:" // tail loop: unique 3: partial_0_4
+ "tbz %x[width], #0, 31f\n"
+ "st1 { v7.b }[4], [x26], #0x1\n"
+ "st1 { v3.b }[4], [x24], #0x1\n"
+ "b 31f\n"
+ "29:" // tail loop: unique 3: partial_1_0
+ "tbz %x[width], #1, 30f\n"
+ "str h7, [x26], #0x2\n"
+ "str h3, [x24], #0x2\n"
+ "tbz %x[width], #0, 31f\n"
+ "st1 { v7.b }[2], [x26], #0x1\n"
+ "st1 { v3.b }[2], [x24], #0x1\n"
+ "b 31f\n"
+ "30:" // tail loop: unique 3: partial_0_0
+ "str b7, [x26], #0x1\n"
+ "str b3, [x24], #0x1\n"
+ "31:" // tail loop: unique 3: Done
+ "subs x23, x23, #0x2\n"
+ "bgt 6b\n"
+ "32:" // odd columns skip
+ : [bn_add] "+&r"(bn_add), [bn_mul] "+&r"(bn_mul), [in0] "+&r"(in0), [in1] "+&r"(in1), [out] "+&r"(out), [out_direct] "+&r"(out_direct), [width] "+&r"(width)
+ : [args_ptr] "r"(&ka), [height] "r"(height), [in0_stride] "r"(in0_stride), [in1_stride] "r"(in1_stride), [offsetof_in0_zeropt] "I"(offsetof(KernelArgs, in0_zeropt)), [offsetof_in1_zeropt] "I"(offsetof(KernelArgs, in1_zeropt)), [offsetof_maxval] "I"(offsetof(KernelArgs, maxval)), [offsetof_minval] "I"(offsetof(KernelArgs, minval)), [offsetof_out_direct_zeropt] "I"(offsetof(KernelArgs, out_direct_zeropt)), [offsetof_out_zeropt] "I"(offsetof(KernelArgs, out_zeropt)), [offsetof_scales] "I"(offsetof(KernelArgs, scales)), [out_direct_stride] "r"(out_direct_stride), [out_stride] "r"(out_stride)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28");
+}
+
+} // namespace
+
+namespace arm_compute
+{
+namespace cpu
+{
+void add_mul_add_u8_neon(const ITensor *input1, const ITensor *input2, const ITensor *bn_mul, const ITensor *bn_add,
+ ITensor *add_output, ITensor *final_output, ConvertPolicy policy, const ActivationLayerInfo &act_info, const Window &window)
+{
+ ARM_COMPUTE_UNUSED(policy);
+
+ const ITensorInfo *final_output_info = final_output->info();
+ const ITensorInfo *add_output_info = (add_output != nullptr) ? add_output->info() : nullptr;
+ const ITensorInfo *input1_info = input1->info();
+ const ITensorInfo *input2_info = input2->info();
+
+ const size_t out_stride = final_output_info->strides_in_bytes()[1];
+ const size_t out_direct_stride = (add_output != nullptr) ? add_output_info->strides_in_bytes()[1] : 0;
+ const size_t in0_stride = input1_info->strides_in_bytes()[1];
+ const size_t in1_stride = input2_info->strides_in_bytes()[1];
+
+ uint8_t minval = std::numeric_limits<uint8_t>::lowest();
+ uint8_t maxval = std::numeric_limits<uint8_t>::max();
+
+ const UniformQuantizationInfo final_output_qinfo = final_output_info->quantization_info().uniform();
+ if(act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU)
+ {
+ minval = quantize_qasymm8(0.f, final_output_qinfo);
+ }
+ else if(act_info.activation() == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
+ {
+ minval = quantize_qasymm8(0.f, final_output_qinfo);
+ maxval = quantize_qasymm8(act_info.a(), final_output_qinfo);
+ }
+ else if(act_info.activation() == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+ {
+ minval = quantize_qasymm8(act_info.b(), final_output_qinfo);
+ maxval = quantize_qasymm8(act_info.a(), final_output_qinfo);
+ }
+
+ const UniformQuantizationInfo in1_qinfo = input1_info->quantization_info().uniform();
+ const UniformQuantizationInfo in2_qinfo = input2_info->quantization_info().uniform();
+ const UniformQuantizationInfo add_output_qinfo = (add_output != nullptr) ? add_output_info->quantization_info().uniform() : UniformQuantizationInfo();
+
+ const int32_t in1_offset = in1_qinfo.offset;
+ const int32_t in2_offset = in2_qinfo.offset;
+ const int32_t out_offset = final_output_qinfo.offset;
+ const int32_t out_direct_offset = add_output_qinfo.offset;
+
+ const float in1_scale = in1_qinfo.scale;
+ const float in2_scale = in2_qinfo.scale;
+ const float out_scale = final_output_qinfo.scale;
+ const float out_direct_scale = add_output_qinfo.scale;
+
+ const float *bn_mul_buffer = reinterpret_cast<float *>(bn_mul->buffer());
+ const float *bn_add_buffer = reinterpret_cast<float *>(bn_add->buffer());
+
+ // Clear X & Y dimensions on execution window as we handle manually
+ Window win = window;
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+ win.set(Window::DimY, Window::Dimension(0, 1, 1));
+
+ Iterator in1_it(input1, window);
+ Iterator in2_it(input2, window);
+ Iterator out_it(final_output, window);
+
+ const size_t width = window.num_iterations(0);
+ const size_t height = window.num_iterations(1);
+
+ if(add_output != nullptr)
+ {
+ Iterator add_out_it(add_output, window);
+ execute_window_loop(
+ win, [&](const Coordinates &)
+ {
+ a64_add_bn_clamp_direct_u8_fp32_2x16(
+ reinterpret_cast<uint8_t *>(out_it.ptr()), out_stride,
+ reinterpret_cast<uint8_t *>(add_out_it.ptr()), out_direct_stride,
+ reinterpret_cast<uint8_t *>(in1_it.ptr()), in0_stride,
+ reinterpret_cast<uint8_t *>(in2_it.ptr()), in1_stride,
+ bn_mul_buffer,
+ bn_add_buffer,
+ minval,
+ maxval,
+ out_offset, out_scale,
+ out_direct_offset, out_direct_scale,
+ in1_offset, in1_scale,
+ in2_offset, in2_scale,
+ width, height);
+ },
+ in1_it, in2_it, add_out_it, out_it);
+ }
+ else
+ {
+ execute_window_loop(
+ win, [&](const Coordinates &)
+ {
+ a64_add_bn_clamp_direct_u8_fp32_2x16(
+ reinterpret_cast<uint8_t *>(out_it.ptr()), out_stride,
+ nullptr, out_direct_stride,
+ reinterpret_cast<uint8_t *>(in1_it.ptr()), in0_stride,
+ reinterpret_cast<uint8_t *>(in2_it.ptr()), in1_stride,
+ bn_mul_buffer,
+ bn_add_buffer,
+ minval,
+ maxval,
+ out_offset, out_scale,
+ out_direct_offset, out_direct_scale,
+ in1_offset, in1_scale,
+ in2_offset, in2_scale,
+ width, height);
+ },
+ in1_it, in2_it, out_it);
+ }
+}
+} // namespace cpu
+} // namespace arm_compute
+
+#endif // __aarch64__
diff --git a/src/cpu/kernels/addmuladd/generic/neon/qasymm8_signed.cpp b/src/cpu/kernels/addmuladd/generic/neon/qasymm8_signed.cpp
new file mode 100644
index 0000000000..3bde83cc26
--- /dev/null
+++ b/src/cpu/kernels/addmuladd/generic/neon/qasymm8_signed.cpp
@@ -0,0 +1,834 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/QuantizationInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Window.h"
+
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+
+#ifdef __aarch64__
+namespace
+{
+void a64_add_bn_clamp_direct_s8_fp32_2x16(
+ int8_t *out, size_t out_stride,
+ int8_t *out_direct, size_t out_direct_stride,
+ const int8_t *in0, size_t in0_stride,
+ const int8_t *in1, size_t in1_stride,
+ const float *bn_mul,
+ const float *bn_add,
+ const int8_t minval,
+ const int8_t maxval,
+ int32_t out_zeropt, float out_scale,
+ int32_t out_direct_zeropt, float out_direct_scale,
+ int32_t in0_zeropt, float in0_scale,
+ int32_t in1_zeropt, float in1_scale,
+ size_t width, size_t height)
+{
+ float scales[4] = { in0_scale, in1_scale, 1.0f / out_scale, 1.0f / out_direct_scale };
+ struct KernelArgs
+ {
+ const float *scales;
+ int32_t in0_zeropt;
+ int32_t in1_zeropt;
+ int32_t out_zeropt;
+ int32_t out_direct_zeropt;
+ int32_t minval;
+ int32_t maxval;
+ } ka;
+ ka.scales = scales;
+ ka.in0_zeropt = in0_zeropt;
+ ka.in1_zeropt = in1_zeropt;
+ ka.out_zeropt = out_zeropt;
+ ka.out_direct_zeropt = out_direct_zeropt;
+ ka.minval = minval;
+ ka.maxval = maxval;
+
+ __asm__ __volatile__(
+ "ldr x20, [%x[args_ptr], %[offsetof_scales]]\n"
+ "ld1 { v0.4s }, [x20]\n"
+ "cmp %x[width], #0x10\n"
+ "blt 5f\n"
+ "1:" // Column loop
+ "ldr q24, [%x[bn_mul], #0x0]\n"
+ "ldr q25, [%x[bn_mul], #0x10]\n"
+ "mov x23, %x[height]\n"
+ "mov x12, %x[in0]\n"
+ "ldr q26, [%x[bn_mul], #0x20]\n"
+ "ldr q27, [%x[bn_mul], #0x30]\n"
+ "mov x11, %x[in1]\n"
+ "mov x10, %x[out]\n"
+ "ldr q28, [%x[bn_add], #0x0]\n"
+ "ldr q29, [%x[bn_add], #0x10]\n"
+ "mov x9, %x[out_direct]\n"
+ "add %x[bn_mul], %x[bn_mul], #0x40\n"
+ "ldr q30, [%x[bn_add], #0x20]\n"
+ "ldr q31, [%x[bn_add], #0x30]\n"
+ "add %x[bn_add], %x[bn_add], #0x40\n"
+ "2:" // Row loop
+ "mov x28, x12\n"
+ "ldr d4, [x28, #0x0]\n"
+ "ldr d3, [x28, #0x8]\n"
+ "add x21, x28, %x[in0_stride]\n"
+ "mov x27, x11\n"
+ "ldr d13, [x27, #0x0]\n"
+ "ldr d12, [x27, #0x8]\n"
+ "cmp x23, #0x2\n"
+ "add x12, x21, %x[in0_stride]\n"
+ "csel x21, x21, x28, GE\n"
+ "ldr d2, [x21, #0x0]\n"
+ "ldr d11, [x21, #0x8]\n"
+ "add x20, x27, %x[in1_stride]\n"
+ "add x11, x20, %x[in1_stride]\n"
+ "ldr w21, [%x[args_ptr], %[offsetof_in0_zeropt]]\n"
+ "sshll v4.8h, v4.8b, #0x0\n"
+ "csel x20, x20, x27, GE\n"
+ "ldr d10, [x20, #0x0]\n"
+ "ldr d9, [x20, #0x8]\n"
+ "sshll v3.8h, v3.8b, #0x0\n"
+ "sshll v2.8h, v2.8b, #0x0\n"
+ "sshll v11.8h, v11.8b, #0x0\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_in1_zeropt]]\n"
+ "mov x26, x10\n"
+ "dup v16.8h, w21\n"
+ "sshll v13.8h, v13.8b, #0x0\n"
+ "mov x25, x9\n"
+ "add x24, x26, %x[out_stride]\n"
+ "sshll v12.8h, v12.8b, #0x0\n"
+ "sshll v10.8h, v10.8b, #0x0\n"
+ "add x22, x25, %x[out_direct_stride]\n"
+ "add x10, x24, %x[out_stride]\n"
+ "sshll v9.8h, v9.8b, #0x0\n"
+ "ssubl v1.4s, v4.4h, v16.4h\n"
+ "add x9, x22, %x[out_direct_stride]\n"
+ "csel x24, x24, x26, GE\n"
+ "ssubl2 v4.4s, v4.8h, v16.8h\n"
+ "ssubl v23.4s, v3.4h, v16.4h\n"
+ "csel x22, x22, x25, GE\n"
+ "ssubl2 v3.4s, v3.8h, v16.8h\n"
+ "ssubl v22.4s, v2.4h, v16.4h\n"
+ "ssubl2 v2.4s, v2.8h, v16.8h\n"
+ "ssubl v21.4s, v11.4h, v16.4h\n"
+ "ssubl2 v11.4s, v11.8h, v16.8h\n"
+ "dup v20.8h, w20\n"
+ "ssubl v19.4s, v13.4h, v20.4h\n"
+ "ssubl2 v13.4s, v13.8h, v20.8h\n"
+ "ssubl v18.4s, v12.4h, v20.4h\n"
+ "ssubl2 v12.4s, v12.8h, v20.8h\n"
+ "ssubl v17.4s, v10.4h, v20.4h\n"
+ "ssubl2 v10.4s, v10.8h, v20.8h\n"
+ "ssubl v16.4s, v9.4h, v20.4h\n"
+ "ssubl2 v9.4s, v9.8h, v20.8h\n"
+ "scvtf v8.4s, v1.4s\n"
+ "scvtf v7.4s, v4.4s\n"
+ "scvtf v6.4s, v23.4s\n"
+ "scvtf v5.4s, v3.4s\n"
+ "scvtf v4.4s, v22.4s\n"
+ "scvtf v3.4s, v2.4s\n"
+ "scvtf v2.4s, v21.4s\n"
+ "scvtf v1.4s, v11.4s\n"
+ "scvtf v19.4s, v19.4s\n"
+ "fmul v8.4s, v8.4s, v0.s[0]\n"
+ "fmla v8.4s, v19.4s, v0.s[1]\n"
+ "scvtf v13.4s, v13.4s\n"
+ "fmul v7.4s, v7.4s, v0.s[0]\n"
+ "fmla v7.4s, v13.4s, v0.s[1]\n"
+ "scvtf v18.4s, v18.4s\n"
+ "fmul v6.4s, v6.4s, v0.s[0]\n"
+ "fmla v6.4s, v18.4s, v0.s[1]\n"
+ "scvtf v12.4s, v12.4s\n"
+ "fmul v5.4s, v5.4s, v0.s[0]\n"
+ "fmla v5.4s, v12.4s, v0.s[1]\n"
+ "scvtf v17.4s, v17.4s\n"
+ "fmul v4.4s, v4.4s, v0.s[0]\n"
+ "fmla v4.4s, v17.4s, v0.s[1]\n"
+ "scvtf v10.4s, v10.4s\n"
+ "fmul v3.4s, v3.4s, v0.s[0]\n"
+ "fmla v3.4s, v10.4s, v0.s[1]\n"
+ "scvtf v16.4s, v16.4s\n"
+ "fmul v2.4s, v2.4s, v0.s[0]\n"
+ "fmla v2.4s, v16.4s, v0.s[1]\n"
+ "scvtf v9.4s, v9.4s\n"
+ "fmul v1.4s, v1.4s, v0.s[0]\n"
+ "fmla v1.4s, v9.4s, v0.s[1]\n"
+ "cbz %x[out_direct], 3f\n"
+ "fmul v23.4s, v8.4s, v0.s[3]\n"
+ "fmul v22.4s, v7.4s, v0.s[3]\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_out_direct_zeropt]]\n"
+ "fmul v21.4s, v6.4s, v0.s[3]\n"
+ "fmul v20.4s, v5.4s, v0.s[3]\n"
+ "fmul v17.4s, v4.4s, v0.s[3]\n"
+ "fmul v19.4s, v3.4s, v0.s[3]\n"
+ "fmul v16.4s, v2.4s, v0.s[3]\n"
+ "fmul v18.4s, v1.4s, v0.s[3]\n"
+ "fcvtas v23.4s, v23.4s\n"
+ "fcvtas v22.4s, v22.4s\n"
+ "fcvtas v21.4s, v21.4s\n"
+ "fcvtas v20.4s, v20.4s\n"
+ "fcvtas v17.4s, v17.4s\n"
+ "fcvtas v19.4s, v19.4s\n"
+ "fcvtas v16.4s, v16.4s\n"
+ "fcvtas v18.4s, v18.4s\n"
+ "uzp1 v22.8h, v23.8h, v22.8h\n"
+ "uzp1 v20.8h, v21.8h, v20.8h\n"
+ "uzp1 v19.8h, v17.8h, v19.8h\n"
+ "uzp1 v18.8h, v16.8h, v18.8h\n"
+ "dup v16.8h, w20\n"
+ "add v22.8h, v22.8h, v16.8h\n"
+ "add v20.8h, v20.8h, v16.8h\n"
+ "add v19.8h, v19.8h, v16.8h\n"
+ "add v18.8h, v18.8h, v16.8h\n"
+ "movi v17.8h, #0x7f\n"
+ "mvni v16.8h, #0x7f\n"
+ "smin v22.8h, v22.8h, v17.8h\n"
+ "smin v20.8h, v20.8h, v17.8h\n"
+ "smin v19.8h, v19.8h, v17.8h\n"
+ "smin v18.8h, v18.8h, v17.8h\n"
+ "smax v22.8h, v22.8h, v16.8h\n"
+ "smax v20.8h, v20.8h, v16.8h\n"
+ "smax v19.8h, v19.8h, v16.8h\n"
+ "smax v18.8h, v18.8h, v16.8h\n"
+ "xtn v22.8b, v22.8h\n"
+ "str d22, [x25, #0x0]\n"
+ "xtn v20.8b, v20.8h\n"
+ "xtn v19.8b, v19.8h\n"
+ "str d20, [x25, #0x8]\n"
+ "xtn v18.8b, v18.8h\n"
+ "str d19, [x22, #0x0]\n"
+ "str d18, [x22, #0x8]\n"
+ "3:" // Main loop: No direct output
+ "mov v19.16b, v28.16b\n"
+ "mov v13.16b, v29.16b\n"
+ "fmla v19.4s, v8.4s, v24.4s\n"
+ "ldr w22, [%x[args_ptr], %[offsetof_out_zeropt]]\n"
+ "mov v18.16b, v30.16b\n"
+ "mov v12.16b, v31.16b\n"
+ "fmla v13.4s, v7.4s, v25.4s\n"
+ "ldr w21, [%x[args_ptr], %[offsetof_maxval]]\n"
+ "mov v17.16b, v28.16b\n"
+ "mov v10.16b, v29.16b\n"
+ "fmla v18.4s, v6.4s, v26.4s\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_minval]]\n"
+ "mov v16.16b, v30.16b\n"
+ "mov v9.16b, v31.16b\n"
+ "fmla v12.4s, v5.4s, v27.4s\n"
+ "subs x23, x23, #0x2\n"
+ "fmla v17.4s, v4.4s, v24.4s\n"
+ "fmla v10.4s, v3.4s, v25.4s\n"
+ "fmul v8.4s, v19.4s, v0.s[2]\n"
+ "fmla v16.4s, v2.4s, v26.4s\n"
+ "fmla v9.4s, v1.4s, v27.4s\n"
+ "fmul v7.4s, v13.4s, v0.s[2]\n"
+ "fmul v6.4s, v18.4s, v0.s[2]\n"
+ "fmul v5.4s, v12.4s, v0.s[2]\n"
+ "fmul v4.4s, v17.4s, v0.s[2]\n"
+ "fmul v3.4s, v10.4s, v0.s[2]\n"
+ "fmul v2.4s, v16.4s, v0.s[2]\n"
+ "fmul v1.4s, v9.4s, v0.s[2]\n"
+ "fcvtas v8.4s, v8.4s\n"
+ "fcvtas v7.4s, v7.4s\n"
+ "fcvtas v6.4s, v6.4s\n"
+ "fcvtas v5.4s, v5.4s\n"
+ "fcvtas v4.4s, v4.4s\n"
+ "fcvtas v3.4s, v3.4s\n"
+ "fcvtas v2.4s, v2.4s\n"
+ "fcvtas v1.4s, v1.4s\n"
+ "uzp1 v7.8h, v8.8h, v7.8h\n"
+ "uzp1 v5.8h, v6.8h, v5.8h\n"
+ "uzp1 v3.8h, v4.8h, v3.8h\n"
+ "uzp1 v1.8h, v2.8h, v1.8h\n"
+ "dup v16.8h, w22\n"
+ "add v7.8h, v7.8h, v16.8h\n"
+ "add v5.8h, v5.8h, v16.8h\n"
+ "add v3.8h, v3.8h, v16.8h\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "dup v16.8h, w21\n"
+ "smin v7.8h, v7.8h, v16.8h\n"
+ "smin v5.8h, v5.8h, v16.8h\n"
+ "smin v3.8h, v3.8h, v16.8h\n"
+ "smin v1.8h, v1.8h, v16.8h\n"
+ "dup v16.8h, w20\n"
+ "smax v7.8h, v7.8h, v16.8h\n"
+ "smax v5.8h, v5.8h, v16.8h\n"
+ "smax v3.8h, v3.8h, v16.8h\n"
+ "smax v1.8h, v1.8h, v16.8h\n"
+ "xtn v7.8b, v7.8h\n"
+ "str d7, [x26, #0x0]\n"
+ "xtn v5.8b, v5.8h\n"
+ "xtn v3.8b, v3.8h\n"
+ "str d5, [x26, #0x8]\n"
+ "xtn v1.8b, v1.8h\n"
+ "str d3, [x24, #0x0]\n"
+ "str d1, [x24, #0x8]\n"
+ "bgt 2b\n"
+ "add %x[in0], %x[in0], #0x10\n"
+ "add %x[in1], %x[in1], #0x10\n"
+ "add %x[out], %x[out], #0x10\n"
+ "cbz %x[out_direct], 4f\n"
+ "add %x[out_direct], %x[out_direct], #0x10\n"
+ "4:" // No direct pointer update
+ "sub %x[width], %x[width], #0x10\n"
+ "cmp %x[width], #0x10\n"
+ "bge 1b\n"
+ "cbz %x[width], 32f\n"
+ "5:" // main loop skip
+ "ldr q24, [%x[bn_mul], #0x0]\n"
+ "ldr q25, [%x[bn_mul], #0x10]\n"
+ "mov x23, %x[height]\n"
+ "mov x12, %x[in0]\n"
+ "ldr q26, [%x[bn_mul], #0x20]\n"
+ "ldr q27, [%x[bn_mul], #0x30]\n"
+ "mov x11, %x[in1]\n"
+ "mov x10, %x[out]\n"
+ "ldr q28, [%x[bn_add], #0x0]\n"
+ "ldr q29, [%x[bn_add], #0x10]\n"
+ "mov x9, %x[out_direct]\n"
+ "add %x[bn_mul], %x[bn_mul], #0x40\n"
+ "ldr q30, [%x[bn_add], #0x20]\n"
+ "ldr q31, [%x[bn_add], #0x30]\n"
+ "add %x[bn_add], %x[bn_add], #0x40\n"
+ "6:" // tail loop: Row loop
+ "mov x28, x12\n"
+ "mov x27, x11\n"
+ "mov x26, x10\n"
+ "mov x25, x9\n"
+ "add x21, x28, %x[in0_stride]\n"
+ "add x20, x27, %x[in1_stride]\n"
+ "add x24, x26, %x[out_stride]\n"
+ "add x22, x25, %x[out_direct_stride]\n"
+ "cmp x23, #0x2\n"
+ "add x12, x21, %x[in0_stride]\n"
+ "add x11, x20, %x[in1_stride]\n"
+ "add x10, x24, %x[out_stride]\n"
+ "add x9, x22, %x[out_direct_stride]\n"
+ "csel x21, x21, x28, GE\n"
+ "csel x20, x20, x27, GE\n"
+ "csel x24, x24, x26, GE\n"
+ "csel x22, x22, x25, GE\n"
+ "tbz %x[width], #3, 10f\n"
+ "ldr d4, [x28, #0x0]\n"
+ "ldr d13, [x27, #0x0]\n"
+ "add x28, x28, #0x8\n"
+ "add x27, x27, #0x8\n"
+ "ldr d2, [x21, #0x0]\n"
+ "ldr d10, [x20, #0x0]\n"
+ "add x21, x21, #0x8\n"
+ "add x20, x20, #0x8\n"
+ "tbz %x[width], #2, 8f\n"
+ "ldr s3, [x28], #0x4\n"
+ "ldr s12, [x27], #0x4\n"
+ "ldr s11, [x21], #0x4\n"
+ "ldr s9, [x20], #0x4\n"
+ "tbz %x[width], #1, 7f\n"
+ "ld1 { v3.h }[2], [x28], #0x2\n"
+ "ld1 { v12.h }[2], [x27], #0x2\n"
+ "ld1 { v11.h }[2], [x21], #0x2\n"
+ "ld1 { v9.h }[2], [x20], #0x2\n"
+ "tbz %x[width], #0, 14f\n"
+ "ld1 { v3.b }[6], [x28], #0x1\n"
+ "ld1 { v12.b }[6], [x27], #0x1\n"
+ "ld1 { v11.b }[6], [x21], #0x1\n"
+ "ld1 { v9.b }[6], [x20], #0x1\n"
+ "b 14f\n"
+ "7:" // tail loop: unique 1: partial_0_12
+ "tbz %x[width], #0, 14f\n"
+ "ld1 { v3.b }[4], [x28], #0x1\n"
+ "ld1 { v12.b }[4], [x27], #0x1\n"
+ "ld1 { v11.b }[4], [x21], #0x1\n"
+ "ld1 { v9.b }[4], [x20], #0x1\n"
+ "b 14f\n"
+ "8:" // tail loop: unique 1: partial_1_8
+ "tbz %x[width], #1, 9f\n"
+ "ldr h3, [x28], #0x2\n"
+ "ldr h12, [x27], #0x2\n"
+ "ldr h11, [x21], #0x2\n"
+ "ldr h9, [x20], #0x2\n"
+ "tbz %x[width], #0, 14f\n"
+ "ld1 { v3.b }[2], [x28], #0x1\n"
+ "ld1 { v12.b }[2], [x27], #0x1\n"
+ "ld1 { v11.b }[2], [x21], #0x1\n"
+ "ld1 { v9.b }[2], [x20], #0x1\n"
+ "b 14f\n"
+ "9:" // tail loop: unique 1: partial_0_8
+ "tbz %x[width], #0, 14f\n"
+ "ldr b3, [x28], #0x1\n"
+ "ldr b12, [x27], #0x1\n"
+ "ldr b11, [x21], #0x1\n"
+ "ldr b9, [x20], #0x1\n"
+ "b 14f\n"
+ "10:" // tail loop: unique 1: partial_2_0
+ "tbz %x[width], #2, 12f\n"
+ "ldr s4, [x28], #0x4\n"
+ "ldr s13, [x27], #0x4\n"
+ "ldr s2, [x21], #0x4\n"
+ "ldr s10, [x20], #0x4\n"
+ "tbz %x[width], #1, 11f\n"
+ "ld1 { v4.h }[2], [x28], #0x2\n"
+ "ld1 { v13.h }[2], [x27], #0x2\n"
+ "ld1 { v2.h }[2], [x21], #0x2\n"
+ "ld1 { v10.h }[2], [x20], #0x2\n"
+ "tbz %x[width], #0, 14f\n"
+ "ld1 { v4.b }[6], [x28], #0x1\n"
+ "ld1 { v13.b }[6], [x27], #0x1\n"
+ "ld1 { v2.b }[6], [x21], #0x1\n"
+ "ld1 { v10.b }[6], [x20], #0x1\n"
+ "b 14f\n"
+ "11:" // tail loop: unique 1: partial_0_4
+ "tbz %x[width], #0, 14f\n"
+ "ld1 { v4.b }[4], [x28], #0x1\n"
+ "ld1 { v13.b }[4], [x27], #0x1\n"
+ "ld1 { v2.b }[4], [x21], #0x1\n"
+ "ld1 { v10.b }[4], [x20], #0x1\n"
+ "b 14f\n"
+ "12:" // tail loop: unique 1: partial_1_0
+ "tbz %x[width], #1, 13f\n"
+ "ldr h4, [x28], #0x2\n"
+ "ldr h13, [x27], #0x2\n"
+ "ldr h2, [x21], #0x2\n"
+ "ldr h10, [x20], #0x2\n"
+ "tbz %x[width], #0, 14f\n"
+ "ld1 { v4.b }[2], [x28], #0x1\n"
+ "ld1 { v13.b }[2], [x27], #0x1\n"
+ "ld1 { v2.b }[2], [x21], #0x1\n"
+ "ld1 { v10.b }[2], [x20], #0x1\n"
+ "b 14f\n"
+ "13:" // tail loop: unique 1: partial_0_0
+ "ldr b4, [x28], #0x1\n"
+ "ldr b13, [x27], #0x1\n"
+ "ldr b2, [x21], #0x1\n"
+ "ldr b10, [x20], #0x1\n"
+ "14:" // tail loop: unique 1: Done
+ "ldr w21, [%x[args_ptr], %[offsetof_in0_zeropt]]\n"
+ "sshll v4.8h, v4.8b, #0x0\n"
+ "sshll v3.8h, v3.8b, #0x0\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_in1_zeropt]]\n"
+ "sshll v2.8h, v2.8b, #0x0\n"
+ "sshll v11.8h, v11.8b, #0x0\n"
+ "dup v16.8h, w21\n"
+ "sshll v13.8h, v13.8b, #0x0\n"
+ "sshll v12.8h, v12.8b, #0x0\n"
+ "sshll v10.8h, v10.8b, #0x0\n"
+ "sshll v9.8h, v9.8b, #0x0\n"
+ "ssubl v1.4s, v4.4h, v16.4h\n"
+ "ssubl2 v4.4s, v4.8h, v16.8h\n"
+ "ssubl v23.4s, v3.4h, v16.4h\n"
+ "ssubl2 v3.4s, v3.8h, v16.8h\n"
+ "ssubl v22.4s, v2.4h, v16.4h\n"
+ "ssubl2 v2.4s, v2.8h, v16.8h\n"
+ "ssubl v21.4s, v11.4h, v16.4h\n"
+ "ssubl2 v11.4s, v11.8h, v16.8h\n"
+ "dup v20.8h, w20\n"
+ "ssubl v19.4s, v13.4h, v20.4h\n"
+ "ssubl2 v13.4s, v13.8h, v20.8h\n"
+ "ssubl v18.4s, v12.4h, v20.4h\n"
+ "ssubl2 v12.4s, v12.8h, v20.8h\n"
+ "ssubl v17.4s, v10.4h, v20.4h\n"
+ "ssubl2 v10.4s, v10.8h, v20.8h\n"
+ "ssubl v16.4s, v9.4h, v20.4h\n"
+ "ssubl2 v9.4s, v9.8h, v20.8h\n"
+ "scvtf v8.4s, v1.4s\n"
+ "scvtf v7.4s, v4.4s\n"
+ "scvtf v6.4s, v23.4s\n"
+ "scvtf v5.4s, v3.4s\n"
+ "scvtf v4.4s, v22.4s\n"
+ "scvtf v3.4s, v2.4s\n"
+ "scvtf v2.4s, v21.4s\n"
+ "scvtf v1.4s, v11.4s\n"
+ "scvtf v19.4s, v19.4s\n"
+ "fmul v8.4s, v8.4s, v0.s[0]\n"
+ "fmla v8.4s, v19.4s, v0.s[1]\n"
+ "scvtf v13.4s, v13.4s\n"
+ "fmul v7.4s, v7.4s, v0.s[0]\n"
+ "fmla v7.4s, v13.4s, v0.s[1]\n"
+ "scvtf v18.4s, v18.4s\n"
+ "fmul v6.4s, v6.4s, v0.s[0]\n"
+ "fmla v6.4s, v18.4s, v0.s[1]\n"
+ "scvtf v12.4s, v12.4s\n"
+ "fmul v5.4s, v5.4s, v0.s[0]\n"
+ "fmla v5.4s, v12.4s, v0.s[1]\n"
+ "scvtf v17.4s, v17.4s\n"
+ "fmul v4.4s, v4.4s, v0.s[0]\n"
+ "fmla v4.4s, v17.4s, v0.s[1]\n"
+ "scvtf v10.4s, v10.4s\n"
+ "fmul v3.4s, v3.4s, v0.s[0]\n"
+ "fmla v3.4s, v10.4s, v0.s[1]\n"
+ "scvtf v16.4s, v16.4s\n"
+ "fmul v2.4s, v2.4s, v0.s[0]\n"
+ "fmla v2.4s, v16.4s, v0.s[1]\n"
+ "scvtf v9.4s, v9.4s\n"
+ "fmul v1.4s, v1.4s, v0.s[0]\n"
+ "fmla v1.4s, v9.4s, v0.s[1]\n"
+ "cbz %x[out_direct], 23f\n"
+ "fmul v23.4s, v8.4s, v0.s[3]\n"
+ "fmul v22.4s, v7.4s, v0.s[3]\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_out_direct_zeropt]]\n"
+ "fmul v21.4s, v6.4s, v0.s[3]\n"
+ "fmul v20.4s, v5.4s, v0.s[3]\n"
+ "fmul v17.4s, v4.4s, v0.s[3]\n"
+ "fmul v19.4s, v3.4s, v0.s[3]\n"
+ "fmul v16.4s, v2.4s, v0.s[3]\n"
+ "fmul v18.4s, v1.4s, v0.s[3]\n"
+ "fcvtas v23.4s, v23.4s\n"
+ "fcvtas v22.4s, v22.4s\n"
+ "fcvtas v21.4s, v21.4s\n"
+ "fcvtas v20.4s, v20.4s\n"
+ "fcvtas v17.4s, v17.4s\n"
+ "fcvtas v19.4s, v19.4s\n"
+ "fcvtas v16.4s, v16.4s\n"
+ "fcvtas v18.4s, v18.4s\n"
+ "uzp1 v22.8h, v23.8h, v22.8h\n"
+ "uzp1 v20.8h, v21.8h, v20.8h\n"
+ "uzp1 v19.8h, v17.8h, v19.8h\n"
+ "uzp1 v18.8h, v16.8h, v18.8h\n"
+ "dup v16.8h, w20\n"
+ "add v22.8h, v22.8h, v16.8h\n"
+ "add v20.8h, v20.8h, v16.8h\n"
+ "add v19.8h, v19.8h, v16.8h\n"
+ "add v18.8h, v18.8h, v16.8h\n"
+ "movi v17.8h, #0x7f\n"
+ "mvni v16.8h, #0x7f\n"
+ "smin v22.8h, v22.8h, v17.8h\n"
+ "smin v20.8h, v20.8h, v17.8h\n"
+ "smin v19.8h, v19.8h, v17.8h\n"
+ "smin v18.8h, v18.8h, v17.8h\n"
+ "smax v22.8h, v22.8h, v16.8h\n"
+ "smax v20.8h, v20.8h, v16.8h\n"
+ "smax v19.8h, v19.8h, v16.8h\n"
+ "smax v18.8h, v18.8h, v16.8h\n"
+ "xtn v22.8b, v22.8h\n"
+ "xtn v20.8b, v20.8h\n"
+ "xtn v19.8b, v19.8h\n"
+ "xtn v18.8b, v18.8h\n"
+ "tbz %x[width], #3, 18f\n"
+ "str d22, [x25, #0x0]\n"
+ "add x25, x25, #0x8\n"
+ "str d19, [x22, #0x0]\n"
+ "add x22, x22, #0x8\n"
+ "tbz %x[width], #2, 16f\n"
+ "str s20, [x25], #0x4\n"
+ "str s18, [x22], #0x4\n"
+ "tbz %x[width], #1, 15f\n"
+ "st1 { v20.h }[2], [x25], #0x2\n"
+ "st1 { v18.h }[2], [x22], #0x2\n"
+ "tbz %x[width], #0, 22f\n"
+ "st1 { v20.b }[6], [x25], #0x1\n"
+ "st1 { v18.b }[6], [x22], #0x1\n"
+ "b 22f\n"
+ "15:" // tail loop: Main loop: unique 2: partial_0_12
+ "tbz %x[width], #0, 22f\n"
+ "st1 { v20.b }[4], [x25], #0x1\n"
+ "st1 { v18.b }[4], [x22], #0x1\n"
+ "b 22f\n"
+ "16:" // tail loop: Main loop: unique 2: partial_1_8
+ "tbz %x[width], #1, 17f\n"
+ "str h20, [x25], #0x2\n"
+ "str h18, [x22], #0x2\n"
+ "tbz %x[width], #0, 22f\n"
+ "st1 { v20.b }[2], [x25], #0x1\n"
+ "st1 { v18.b }[2], [x22], #0x1\n"
+ "b 22f\n"
+ "17:" // tail loop: Main loop: unique 2: partial_0_8
+ "tbz %x[width], #0, 22f\n"
+ "str b20, [x25], #0x1\n"
+ "str b18, [x22], #0x1\n"
+ "b 22f\n"
+ "18:" // tail loop: Main loop: unique 2: partial_2_0
+ "tbz %x[width], #2, 20f\n"
+ "str s22, [x25], #0x4\n"
+ "str s19, [x22], #0x4\n"
+ "tbz %x[width], #1, 19f\n"
+ "st1 { v22.h }[2], [x25], #0x2\n"
+ "st1 { v19.h }[2], [x22], #0x2\n"
+ "tbz %x[width], #0, 22f\n"
+ "st1 { v22.b }[6], [x25], #0x1\n"
+ "st1 { v19.b }[6], [x22], #0x1\n"
+ "b 22f\n"
+ "19:" // tail loop: Main loop: unique 2: partial_0_4
+ "tbz %x[width], #0, 22f\n"
+ "st1 { v22.b }[4], [x25], #0x1\n"
+ "st1 { v19.b }[4], [x22], #0x1\n"
+ "b 22f\n"
+ "20:" // tail loop: Main loop: unique 2: partial_1_0
+ "tbz %x[width], #1, 21f\n"
+ "str h22, [x25], #0x2\n"
+ "str h19, [x22], #0x2\n"
+ "tbz %x[width], #0, 22f\n"
+ "st1 { v22.b }[2], [x25], #0x1\n"
+ "st1 { v19.b }[2], [x22], #0x1\n"
+ "b 22f\n"
+ "21:" // tail loop: Main loop: unique 2: partial_0_0
+ "str b22, [x25], #0x1\n"
+ "str b19, [x22], #0x1\n"
+ "22:" // tail loop: Main loop: unique 2: Done
+ "23:" // tail loop: Main loop: No direct output
+ "mov v19.16b, v28.16b\n"
+ "mov v13.16b, v29.16b\n"
+ "fmla v19.4s, v8.4s, v24.4s\n"
+ "ldr w22, [%x[args_ptr], %[offsetof_out_zeropt]]\n"
+ "mov v18.16b, v30.16b\n"
+ "mov v12.16b, v31.16b\n"
+ "fmla v13.4s, v7.4s, v25.4s\n"
+ "ldr w21, [%x[args_ptr], %[offsetof_maxval]]\n"
+ "mov v17.16b, v28.16b\n"
+ "mov v10.16b, v29.16b\n"
+ "fmla v18.4s, v6.4s, v26.4s\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_minval]]\n"
+ "mov v16.16b, v30.16b\n"
+ "mov v9.16b, v31.16b\n"
+ "fmla v12.4s, v5.4s, v27.4s\n"
+ "fmla v17.4s, v4.4s, v24.4s\n"
+ "fmla v10.4s, v3.4s, v25.4s\n"
+ "fmul v8.4s, v19.4s, v0.s[2]\n"
+ "fmla v16.4s, v2.4s, v26.4s\n"
+ "fmla v9.4s, v1.4s, v27.4s\n"
+ "fmul v7.4s, v13.4s, v0.s[2]\n"
+ "fmul v6.4s, v18.4s, v0.s[2]\n"
+ "fmul v5.4s, v12.4s, v0.s[2]\n"
+ "fmul v4.4s, v17.4s, v0.s[2]\n"
+ "fmul v3.4s, v10.4s, v0.s[2]\n"
+ "fmul v2.4s, v16.4s, v0.s[2]\n"
+ "fmul v1.4s, v9.4s, v0.s[2]\n"
+ "fcvtas v8.4s, v8.4s\n"
+ "fcvtas v7.4s, v7.4s\n"
+ "fcvtas v6.4s, v6.4s\n"
+ "fcvtas v5.4s, v5.4s\n"
+ "fcvtas v4.4s, v4.4s\n"
+ "fcvtas v3.4s, v3.4s\n"
+ "fcvtas v2.4s, v2.4s\n"
+ "fcvtas v1.4s, v1.4s\n"
+ "uzp1 v7.8h, v8.8h, v7.8h\n"
+ "uzp1 v5.8h, v6.8h, v5.8h\n"
+ "uzp1 v3.8h, v4.8h, v3.8h\n"
+ "uzp1 v1.8h, v2.8h, v1.8h\n"
+ "dup v16.8h, w22\n"
+ "add v7.8h, v7.8h, v16.8h\n"
+ "add v5.8h, v5.8h, v16.8h\n"
+ "add v3.8h, v3.8h, v16.8h\n"
+ "add v1.8h, v1.8h, v16.8h\n"
+ "dup v16.8h, w21\n"
+ "smin v7.8h, v7.8h, v16.8h\n"
+ "smin v5.8h, v5.8h, v16.8h\n"
+ "smin v3.8h, v3.8h, v16.8h\n"
+ "smin v1.8h, v1.8h, v16.8h\n"
+ "dup v16.8h, w20\n"
+ "smax v7.8h, v7.8h, v16.8h\n"
+ "smax v5.8h, v5.8h, v16.8h\n"
+ "smax v3.8h, v3.8h, v16.8h\n"
+ "smax v1.8h, v1.8h, v16.8h\n"
+ "xtn v7.8b, v7.8h\n"
+ "xtn v5.8b, v5.8h\n"
+ "xtn v3.8b, v3.8h\n"
+ "xtn v1.8b, v1.8h\n"
+ "tbz %x[width], #3, 27f\n"
+ "str d7, [x26, #0x0]\n"
+ "add x26, x26, #0x8\n"
+ "str d3, [x24, #0x0]\n"
+ "add x24, x24, #0x8\n"
+ "tbz %x[width], #2, 25f\n"
+ "str s5, [x26], #0x4\n"
+ "str s1, [x24], #0x4\n"
+ "tbz %x[width], #1, 24f\n"
+ "st1 { v5.h }[2], [x26], #0x2\n"
+ "st1 { v1.h }[2], [x24], #0x2\n"
+ "tbz %x[width], #0, 31f\n"
+ "st1 { v5.b }[6], [x26], #0x1\n"
+ "st1 { v1.b }[6], [x24], #0x1\n"
+ "b 31f\n"
+ "24:" // tail loop: unique 3: partial_0_12
+ "tbz %x[width], #0, 31f\n"
+ "st1 { v5.b }[4], [x26], #0x1\n"
+ "st1 { v1.b }[4], [x24], #0x1\n"
+ "b 31f\n"
+ "25:" // tail loop: unique 3: partial_1_8
+ "tbz %x[width], #1, 26f\n"
+ "str h5, [x26], #0x2\n"
+ "str h1, [x24], #0x2\n"
+ "tbz %x[width], #0, 31f\n"
+ "st1 { v5.b }[2], [x26], #0x1\n"
+ "st1 { v1.b }[2], [x24], #0x1\n"
+ "b 31f\n"
+ "26:" // tail loop: unique 3: partial_0_8
+ "tbz %x[width], #0, 31f\n"
+ "str b5, [x26], #0x1\n"
+ "str b1, [x24], #0x1\n"
+ "b 31f\n"
+ "27:" // tail loop: unique 3: partial_2_0
+ "tbz %x[width], #2, 29f\n"
+ "str s7, [x26], #0x4\n"
+ "str s3, [x24], #0x4\n"
+ "tbz %x[width], #1, 28f\n"
+ "st1 { v7.h }[2], [x26], #0x2\n"
+ "st1 { v3.h }[2], [x24], #0x2\n"
+ "tbz %x[width], #0, 31f\n"
+ "st1 { v7.b }[6], [x26], #0x1\n"
+ "st1 { v3.b }[6], [x24], #0x1\n"
+ "b 31f\n"
+ "28:" // tail loop: unique 3: partial_0_4
+ "tbz %x[width], #0, 31f\n"
+ "st1 { v7.b }[4], [x26], #0x1\n"
+ "st1 { v3.b }[4], [x24], #0x1\n"
+ "b 31f\n"
+ "29:" // tail loop: unique 3: partial_1_0
+ "tbz %x[width], #1, 30f\n"
+ "str h7, [x26], #0x2\n"
+ "str h3, [x24], #0x2\n"
+ "tbz %x[width], #0, 31f\n"
+ "st1 { v7.b }[2], [x26], #0x1\n"
+ "st1 { v3.b }[2], [x24], #0x1\n"
+ "b 31f\n"
+ "30:" // tail loop: unique 3: partial_0_0
+ "str b7, [x26], #0x1\n"
+ "str b3, [x24], #0x1\n"
+ "31:" // tail loop: unique 3: Done
+ "subs x23, x23, #0x2\n"
+ "bgt 6b\n"
+ "32:" // odd columns skip
+ : [bn_add] "+&r"(bn_add), [bn_mul] "+&r"(bn_mul), [in0] "+&r"(in0), [in1] "+&r"(in1), [out] "+&r"(out), [out_direct] "+&r"(out_direct), [width] "+&r"(width)
+ : [args_ptr] "r"(&ka), [height] "r"(height), [in0_stride] "r"(in0_stride), [in1_stride] "r"(in1_stride), [offsetof_in0_zeropt] "I"(offsetof(KernelArgs, in0_zeropt)), [offsetof_in1_zeropt] "I"(offsetof(KernelArgs, in1_zeropt)), [offsetof_maxval] "I"(offsetof(KernelArgs, maxval)), [offsetof_minval] "I"(offsetof(KernelArgs, minval)), [offsetof_out_direct_zeropt] "I"(offsetof(KernelArgs, out_direct_zeropt)), [offsetof_out_zeropt] "I"(offsetof(KernelArgs, out_zeropt)), [offsetof_scales] "I"(offsetof(KernelArgs, scales)), [out_direct_stride] "r"(out_direct_stride), [out_stride] "r"(out_stride)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28");
+}
+
+} // namespace
+
+namespace arm_compute
+{
+namespace cpu
+{
+void add_mul_add_s8_neon(const ITensor *input1, const ITensor *input2, const ITensor *bn_mul, const ITensor *bn_add,
+ ITensor *add_output, ITensor *final_output, ConvertPolicy policy, const ActivationLayerInfo &act_info, const Window &window)
+{
+ ARM_COMPUTE_UNUSED(policy);
+
+ const ITensorInfo *final_output_info = final_output->info();
+ const ITensorInfo *add_output_info = (add_output != nullptr) ? add_output->info() : nullptr;
+ const ITensorInfo *input1_info = input1->info();
+ const ITensorInfo *input2_info = input2->info();
+
+ const size_t out_stride = final_output_info->strides_in_bytes()[1];
+ const size_t out_direct_stride = (add_output != nullptr) ? add_output_info->strides_in_bytes()[1] : 0;
+ const size_t in0_stride = input1_info->strides_in_bytes()[1];
+ const size_t in1_stride = input2_info->strides_in_bytes()[1];
+
+ int8_t minval = std::numeric_limits<int8_t>::lowest();
+ int8_t maxval = std::numeric_limits<int8_t>::max();
+
+ const UniformQuantizationInfo final_output_qinfo = final_output_info->quantization_info().uniform();
+ if(act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU)
+ {
+ minval = quantize_qasymm8_signed(0.f, final_output_qinfo);
+ }
+ else if(act_info.activation() == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
+ {
+ minval = quantize_qasymm8_signed(0.f, final_output_qinfo);
+ maxval = quantize_qasymm8_signed(act_info.a(), final_output_qinfo);
+ }
+ else if(act_info.activation() == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
+ {
+ minval = quantize_qasymm8_signed(act_info.b(), final_output_qinfo);
+ maxval = quantize_qasymm8_signed(act_info.a(), final_output_qinfo);
+ }
+
+ const UniformQuantizationInfo in1_qinfo = input1_info->quantization_info().uniform();
+ const UniformQuantizationInfo in2_qinfo = input2_info->quantization_info().uniform();
+ const UniformQuantizationInfo add_output_qinfo = (add_output != nullptr) ? add_output_info->quantization_info().uniform() : UniformQuantizationInfo();
+
+ const int32_t in1_offset = in1_qinfo.offset;
+ const int32_t in2_offset = in2_qinfo.offset;
+ const int32_t out_offset = final_output_qinfo.offset;
+ const int32_t out_direct_offset = add_output_qinfo.offset;
+
+ const float in1_scale = in1_qinfo.scale;
+ const float in2_scale = in2_qinfo.scale;
+ const float out_scale = final_output_qinfo.scale;
+ const float out_direct_scale = add_output_qinfo.scale;
+
+ const float *bn_mul_buffer = reinterpret_cast<float *>(bn_mul->buffer());
+ const float *bn_add_buffer = reinterpret_cast<float *>(bn_add->buffer());
+
+ // Clear X & Y dimensions on execution window as we handle manually
+ Window win = window;
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+ win.set(Window::DimY, Window::Dimension(0, 1, 1));
+
+ Iterator in1_it(input1, window);
+ Iterator in2_it(input2, window);
+ Iterator out_it(final_output, window);
+
+ const size_t width = window.num_iterations(0);
+ const size_t height = window.num_iterations(1);
+
+ if(add_output != nullptr)
+ {
+ Iterator add_out_it(add_output, window);
+ execute_window_loop(
+ win, [&](const Coordinates &)
+ {
+ a64_add_bn_clamp_direct_s8_fp32_2x16(
+ reinterpret_cast<int8_t *>(out_it.ptr()), out_stride,
+ reinterpret_cast<int8_t *>(add_out_it.ptr()), out_direct_stride,
+ reinterpret_cast<int8_t *>(in1_it.ptr()), in0_stride,
+ reinterpret_cast<int8_t *>(in2_it.ptr()), in1_stride,
+ bn_mul_buffer,
+ bn_add_buffer,
+ minval,
+ maxval,
+ out_offset, out_scale,
+ out_direct_offset, out_direct_scale,
+ in1_offset, in1_scale,
+ in2_offset, in2_scale,
+ width, height);
+ },
+ in1_it, in2_it, add_out_it, out_it);
+ }
+ else
+ {
+ execute_window_loop(
+ win, [&](const Coordinates &)
+ {
+ a64_add_bn_clamp_direct_s8_fp32_2x16(
+ reinterpret_cast<int8_t *>(out_it.ptr()), out_stride,
+ nullptr, out_direct_stride,
+ reinterpret_cast<int8_t *>(in1_it.ptr()), in0_stride,
+ reinterpret_cast<int8_t *>(in2_it.ptr()), in1_stride,
+ bn_mul_buffer,
+ bn_add_buffer,
+ minval,
+ maxval,
+ out_offset, out_scale,
+ out_direct_offset, out_direct_scale,
+ in1_offset, in1_scale,
+ in2_offset, in2_scale,
+ width, height);
+ },
+ in1_it, in2_it, out_it);
+ }
+}
+} // namespace cpu
+} // namespace arm_compute
+
+#endif // __aarch64__
diff --git a/src/cpu/kernels/addmuladd/list.h b/src/cpu/kernels/addmuladd/list.h
new file mode 100644
index 0000000000..a7c22c06d8
--- /dev/null
+++ b/src/cpu/kernels/addmuladd/list.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CPU_KERNELS_ADDMULADD_LIST
+#define SRC_CPU_KERNELS_ADDMULADD_LIST
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Window.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+#define DECLARE_ADD_MUL_ADD_KERNEL(func_name) \
+ void func_name(const ITensor *input1, const ITensor *input2, const ITensor *bn_mul, const ITensor *bn_add, \
+ ITensor *add_output, ITensor *final_output, ConvertPolicy policy, const ActivationLayerInfo &act_info, const Window &window)
+
+DECLARE_ADD_MUL_ADD_KERNEL(add_mul_add_fp32_neon);
+DECLARE_ADD_MUL_ADD_KERNEL(add_mul_add_fp16_neon);
+DECLARE_ADD_MUL_ADD_KERNEL(add_mul_add_u8_neon);
+DECLARE_ADD_MUL_ADD_KERNEL(add_mul_add_s8_neon);
+
+#undef DECLARE_ADD_MUL_ADD_KERNEL
+
+} // namespace cpu
+} // namespace arm_compute
+#endif /* SRC_CPU_KERNELS_ADDMULADD_LIST */
diff --git a/src/cpu/operators/CpuAddMulAdd.cpp b/src/cpu/operators/CpuAddMulAdd.cpp
new file mode 100644
index 0000000000..3fd690e3f9
--- /dev/null
+++ b/src/cpu/operators/CpuAddMulAdd.cpp
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/experimental/Types.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/cpu/kernels/CpuAddMulAddKernel.h"
+#include "src/cpu/operators/CpuAddMulAdd.h"
+#include "src/cpu/utils/CpuAuxTensorHandler.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void CpuAddMulAdd::configure(const ITensorInfo *input1, const ITensorInfo *input2,
+ const ITensorInfo *bn_mul, const ITensorInfo *bn_add,
+ ITensorInfo *add_output, ITensorInfo *final_output,
+ ConvertPolicy policy, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_LOG_PARAMS(input1, input2, bn_mul, bn_add, add_output, final_output, policy, act_info);
+
+ auto k = std::make_unique<kernels::CpuAddMulAddKernel>();
+
+ const DataType data_type = input1->data_type();
+ if(is_data_type_quantized(data_type))
+ {
+ _dequantize_bn_mul.configure(bn_mul, &_dequantized_bn_mul);
+ _dequantize_bn_add.configure(bn_add, &_dequantized_bn_add);
+
+ k->configure(input1, input2, &_dequantized_bn_mul, &_dequantized_bn_add, add_output, final_output, policy, act_info);
+
+ // Save auxilary memory requirements after configuration
+ _aux_mem[DequantizedBnMul] = experimental::MemoryInfo(offset_int_vec(DequantizedBnMul), experimental::MemoryLifetime::Temporary, _dequantized_bn_mul.total_size());
+ _aux_mem[DequantizedBnAdd] = experimental::MemoryInfo(offset_int_vec(DequantizedBnAdd), experimental::MemoryLifetime::Temporary, _dequantized_bn_add.total_size());
+ }
+ else
+ {
+ k->configure(input1, input2, bn_mul, bn_add, add_output, final_output, policy, act_info);
+ }
+
+ _kernel = std::move(k);
+}
+
+Status CpuAddMulAdd::validate(const ITensorInfo *input1, const ITensorInfo *input2,
+ const ITensorInfo *bn_mul, const ITensorInfo *bn_add,
+ const ITensorInfo *add_output, const ITensorInfo *final_output,
+ ConvertPolicy policy, const ActivationLayerInfo &act_info)
+{
+ const DataType data_type = input1->data_type();
+ if(is_data_type_quantized(data_type))
+ {
+ TensorInfo dequantized_bn_mul;
+ TensorInfo dequantized_bn_add;
+
+ ARM_COMPUTE_RETURN_ON_ERROR(CpuDequantize::validate(bn_mul, &dequantized_bn_mul));
+ ARM_COMPUTE_RETURN_ON_ERROR(CpuDequantize::validate(bn_add, &dequantized_bn_add));
+
+ return kernels::CpuAddMulAddKernel::validate(input1, input2, &dequantized_bn_mul, &dequantized_bn_add, add_output, final_output, policy, act_info);
+ }
+ else
+ {
+ return kernels::CpuAddMulAddKernel::validate(input1, input2, bn_mul, bn_add, add_output, final_output, policy, act_info);
+ }
+}
+
+void CpuAddMulAdd::run(ITensorPack &tensors)
+{
+ const DataType data_type = tensors.get_const_tensor(TensorType::ACL_SRC_0)->info()->data_type();
+
+ if(is_data_type_quantized(data_type))
+ {
+ const ITensor *bn_mul = tensors.get_const_tensor(TensorType::ACL_SRC_2);
+ const ITensor *bn_add = tensors.get_const_tensor(TensorType::ACL_SRC_3);
+
+ CpuAuxTensorHandler dequantized_bn_mul_handler(offset_int_vec(DequantizedBnMul), _dequantized_bn_mul, tensors, true);
+ CpuAuxTensorHandler dequantized_bn_add_handler(offset_int_vec(DequantizedBnAdd), _dequantized_bn_add, tensors, true);
+
+ ITensorPack dequantize_mul_pack =
+ {
+ { TensorType::ACL_SRC_0, bn_mul },
+ { TensorType::ACL_DST_0, dequantized_bn_mul_handler.get() }
+ };
+
+ ITensorPack dequantize_add_pack =
+ {
+ { TensorType::ACL_SRC_0, bn_add },
+ { TensorType::ACL_DST_0, dequantized_bn_add_handler.get() }
+ };
+
+ _dequantize_bn_mul.run(dequantize_mul_pack);
+ _dequantize_bn_add.run(dequantize_add_pack);
+
+ ITensorPack add_mul_add_pack =
+ {
+ { TensorType::ACL_SRC_0, tensors.get_const_tensor(TensorType::ACL_SRC_0) },
+ { TensorType::ACL_SRC_1, tensors.get_const_tensor(TensorType::ACL_SRC_1) },
+ { TensorType::ACL_SRC_2, dequantized_bn_mul_handler.get() },
+ { TensorType::ACL_SRC_3, dequantized_bn_add_handler.get() },
+ { TensorType::ACL_DST_0, tensors.get_tensor(TensorType::ACL_DST_0) },
+ { TensorType::ACL_DST_1, tensors.get_tensor(TensorType::ACL_DST_1) },
+ };
+
+ NEScheduler::get().schedule_op(_kernel.get(), Window::DimY, _kernel->window(), add_mul_add_pack);
+ }
+ else
+ {
+ NEScheduler::get().schedule_op(_kernel.get(), Window::DimY, _kernel->window(), tensors);
+ }
+}
+
+experimental::MemoryRequirements CpuAddMulAdd::workspace() const
+{
+ return _aux_mem;
+}
+
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/operators/CpuAddMulAdd.h b/src/cpu/operators/CpuAddMulAdd.h
new file mode 100644
index 0000000000..cf1ece68f1
--- /dev/null
+++ b/src/cpu/operators/CpuAddMulAdd.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CPU_OPERATORS_CPUADDMULADD
+#define SRC_CPU_OPERATORS_CPUADDMULADD
+
+#include "arm_compute/core/TensorInfo.h"
+
+#include "src/cpu/ICpuOperator.h"
+#include "src/cpu/operators/CpuDequantize.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+/** Basic function to run @ref kernels::CpuAddMulAddKernel */
+class CpuAddMulAdd : public ICpuOperator
+{
+public:
+ /** Initialize the operator's inputs and outputs.
+ *
+ * Similar to @ref NEAddMulAdd::configure()
+ *
+ */
+ void configure(const ITensorInfo *input1, const ITensorInfo *input2,
+ const ITensorInfo *bn_mul, const ITensorInfo *bn_add,
+ ITensorInfo *add_output, ITensorInfo *final_output,
+ ConvertPolicy policy, const ActivationLayerInfo &act_info);
+ /** Static function to check if given info will lead to a valid configuration
+ *
+ * Similar to @ref CpuAddMulAdd::configure()
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2,
+ const ITensorInfo *bn_mul, const ITensorInfo *bn_add,
+ const ITensorInfo *add_output, const ITensorInfo *final_output,
+ ConvertPolicy policy, const ActivationLayerInfo &act_info);
+
+ // Inherited methods overridden:
+ void run(ITensorPack &tensors) override;
+
+ // We need auxilary memory to dequantize batchnorm coefficients
+ experimental::MemoryRequirements workspace() const override;
+
+private:
+ enum AuxTensorIdx
+ {
+ DequantizedBnMul = 0,
+ DequantizedBnAdd,
+ Count
+ };
+
+ CpuDequantize _dequantize_bn_mul{};
+ CpuDequantize _dequantize_bn_add{};
+
+ TensorInfo _dequantized_bn_mul{};
+ TensorInfo _dequantized_bn_add{};
+
+ experimental::MemoryRequirements _aux_mem{ Count };
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /* SRC_CPU_OPERATORS_CPUADDMULADD */
diff --git a/src/runtime/NEON/functions/NEAddMulAdd.cpp b/src/runtime/NEON/functions/NEAddMulAdd.cpp
new file mode 100644
index 0000000000..55008de5d6
--- /dev/null
+++ b/src/runtime/NEON/functions/NEAddMulAdd.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/runtime/NEON/functions/NEAddMulAdd.h"
+
+#include "src/common/utils/Log.h"
+#include "src/core/helpers/MemoryHelpers.h"
+#include "src/cpu/operators/CpuAddMulAdd.h"
+
+namespace arm_compute
+{
+struct NEAddMulAdd::Impl
+{
+ std::unique_ptr<cpu::CpuAddMulAdd> op{ nullptr };
+ WorkspaceData<Tensor> workspace_tensors{};
+ ITensorPack run_pack{};
+ MemoryGroup memory_group{};
+};
+
+NEAddMulAdd::NEAddMulAdd(std::shared_ptr<IMemoryManager> memory_manager)
+ : _impl(std::make_unique<Impl>())
+{
+ _impl->memory_group = MemoryGroup(std::move(memory_manager));
+}
+
+NEAddMulAdd::~NEAddMulAdd() = default;
+
+void NEAddMulAdd::configure(ITensor *input1, ITensor *input2, ITensor *bn_mul, ITensor *bn_add, ITensor *add_output,
+ ITensor *final_output, const ConvertPolicy policy, const ActivationLayerInfo &act_info)
+{
+ ARM_COMPUTE_LOG_PARAMS(input1, input2, bn_mul, bn_add, add_output, final_output, policy, act_info);
+
+ _impl->op = std::make_unique<cpu::CpuAddMulAdd>();
+ _impl->op->configure(input1->info(), input2->info(), bn_mul->info(),
+ bn_add->info(), add_output != nullptr ? add_output->info() : nullptr, final_output->info(), policy, act_info);
+
+ _impl->run_pack =
+ {
+ { TensorType::ACL_SRC_0, input1 },
+ { TensorType::ACL_SRC_1, input2 },
+ { TensorType::ACL_SRC_2, bn_mul },
+ { TensorType::ACL_SRC_3, bn_add },
+ { TensorType::ACL_DST_0, add_output },
+ { TensorType::ACL_DST_1, final_output },
+ };
+
+ _impl->workspace_tensors = manage_workspace<Tensor>(_impl->op->workspace(), _impl->memory_group, _impl->run_pack);
+}
+
+Status NEAddMulAdd::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *bn_mul,
+ const ITensorInfo *bn_add, const ITensorInfo *add_output, const ITensorInfo *final_output,
+ ConvertPolicy policy, const ActivationLayerInfo &act_info)
+{
+ return cpu::CpuAddMulAdd::validate(input1, input2, bn_mul, bn_add, add_output, final_output, policy, act_info);
+}
+
+void NEAddMulAdd::run()
+{
+ _impl->op->run(_impl->run_pack);
+}
+} // namespace arm_compute
diff --git a/tests/validation/NEON/AddMulAdd.cpp b/tests/validation/NEON/AddMulAdd.cpp
new file mode 100644
index 0000000000..f0aba7833b
--- /dev/null
+++ b/tests/validation/NEON/AddMulAdd.cpp
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEAddMulAdd.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+
+#include "tests/NEON/Accessor.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/AddMulAddFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */
+const AbsoluteTolerance<half> tolerance_fp16(half(0.1f)); /**< Tolerance for 16-bit floating point tests */
+constexpr AbsoluteTolerance<float> tolerance_quant(1); /**< Tolerance for quantized tests */
+
+const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
+{
+ ActivationLayerInfo(),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+
+ // Boundaries are aligned with Quantized Data ranges -- DOUBLE check before changing
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, -2.f)
+});
+
+// QASYMM8 test quantizations
+const auto qasymm8_input1_qinfo_set = framework::dataset::make("Input1QInfo", { QuantizationInfo(0.1, 10) }); // Representable Range: [-1, 24.5]
+const auto qasymm8_input2_qinfo_set = framework::dataset::make("Input2QInfo", { QuantizationInfo(0.2, 60) }); // Representable Range: [-12, 39]
+const auto qasymm8_bn_mul_qinfo_set = framework::dataset::make("BnMulInfo", { QuantizationInfo(0.001, 55) }); // Representable Range: [-0.11, 0.2]
+const auto qasymm8_bn_add_qinfo_set = framework::dataset::make("BnAddInfo", { QuantizationInfo(0.02, 20) }); // Representable Range: [-0.4, 4.7]
+
+// Representable Range: [-9.36, 51.84], Expected F32 range: [-13, 63.5], leaving some space for saturation
+const auto qasymm8_add_output_qinfo_set = framework::dataset::make("AddOutputInfo", { QuantizationInfo(0.24, 39) });
+
+// Representable Range: [-4.8, 10.5], Expected FP32 range: [-6.985, 12.7], leaving some space for saturation
+// This range also makes sense with the activation boundaries above, i.e. [-2, 8] for LU_BOUNDED_RELU and [0, 6] for BOUNDED_RELU
+const auto qasymm8_final_output_qinfo_set = framework::dataset::make("FinalOutputInfo", { QuantizationInfo(0.06, 80) });
+
+// QASYMM8_SIGNED test quantizations
+const auto qasymm8_signed_input1_qinfo_set = framework::dataset::make("Input1QInfo", { QuantizationInfo(0.1, 10) }); // Representable Range: [-13.8, 11.7]
+const auto qasymm8_signed_input2_qinfo_set = framework::dataset::make("Input2QInfo", { QuantizationInfo(0.2, -60) }); // Representable Range: [-13.6, 39.4]
+const auto qasymm8_signed_bn_mul_qinfo_set = framework::dataset::make("BnMulInfo", { QuantizationInfo(0.001, 55) }); // Representable Range: [-0.183, 0.072]
+const auto qasymm8_signed_bn_add_qinfo_set = framework::dataset::make("BnAddInfo", { QuantizationInfo(0.4, -120) }); // Representable Range: [-0.32, 9.08]
+
+// Representable Range: [-21.36, 39.84], Expected F32 range: [-27.4, 51.1], leaving some space for saturation
+const auto qasymm8_signed_add_output_qinfo_set = framework::dataset::make("AddOutputInfo", { QuantizationInfo(0.24, -39) });
+
+// Representable Range: [-4.8, 10.5], Expected FP32 range: [-9.6713, 14.0942], leaving some space for saturation
+// This range also makes sense with the activation boundaries above, i.e. [-2, 8] for LU_BOUNDED_RELU and [0, 6] for BOUNDED_RELU
+const auto qasymm8_signed_final_output_qinfo_set = framework::dataset::make("FinalOutputInfo", { QuantizationInfo(0.06, -48) });
+
+} // namespace
+
+TEST_SUITE(NEON)
+TEST_SUITE(AddMulAdd)
+
+template <typename T>
+using NEAddMulAddFloatFixture = AddMulAddFloatValidationFixture<Tensor, Accessor, NEAddMulAdd, T, true>;
+
+template <typename T>
+using NEAddMulAddFloatFixtureWoIntermOut = AddMulAddFloatValidationFixture<Tensor, Accessor, NEAddMulAdd, T, false>;
+
+TEST_SUITE(Float)
+
+TEST_SUITE(F32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEAddMulAddFloatFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::F32)),
+ ActivationFunctionsDataset))
+{
+ // Validate outputs
+ validate(Accessor(_interm_target), _interm_reference); // Arithmetic Addition has more strict tolerance
+ validate(Accessor(_target), _reference, tolerance_fp32);
+}
+
+// This test is to stress the case when there is no intermediate output required (i.e. nullptr)
+FIXTURE_DATA_TEST_CASE(RunSmallWithoutIntermOutput, NEAddMulAddFloatFixtureWoIntermOut<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })))
+{
+ // Validate outputs
+ validate(Accessor(_target), _reference, tolerance_fp32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEAddMulAddFloatFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(),
+ framework::dataset::make("DataType", DataType::F32)),
+ ActivationFunctionsDataset))
+{
+ // Validate outputs
+ validate(Accessor(_interm_target), _interm_reference); // Arithmetic Addition has more strict tolerance
+ validate(Accessor(_target), _reference, tolerance_fp32);
+}
+
+TEST_SUITE_END() // F32
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(F16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEAddMulAddFloatFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::F16)),
+ ActivationFunctionsDataset))
+{
+ // Validate outputs
+ validate(Accessor(_interm_target), _interm_reference); // Arithmetic Addition has more strict tolerance
+ validate(Accessor(_target), _reference, tolerance_fp16);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEAddMulAddFloatFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(),
+ framework::dataset::make("DataType", DataType::F16)),
+ ActivationFunctionsDataset))
+{
+ // Validate outputs
+ validate(Accessor(_interm_target), _interm_reference); // Arithmetic Addition has more strict tolerance
+ validate(Accessor(_target), _reference, tolerance_fp16);
+}
+TEST_SUITE_END() // F16
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+TEST_SUITE_END() // Float
+
+template <typename T>
+using NEAddMulQuantizedFixture = AddMulAddQuantizedValidationFixture<Tensor, Accessor, NEAddMulAdd, T, true>;
+
+template <typename T>
+using NEAddMulAddQuantizedFixtureWoIntermOut = AddMulAddQuantizedValidationFixture<Tensor, Accessor, NEAddMulAdd, T, false>;
+
+TEST_SUITE(Quantized)
+
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEAddMulQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ ActivationFunctionsDataset),
+ qasymm8_input1_qinfo_set),
+ qasymm8_input2_qinfo_set),
+ qasymm8_bn_mul_qinfo_set),
+ qasymm8_bn_add_qinfo_set),
+ qasymm8_add_output_qinfo_set),
+ qasymm8_final_output_qinfo_set))
+{
+ // Validate outputs
+ validate(Accessor(_interm_target), _interm_reference, tolerance_quant);
+ validate(Accessor(_target), _reference, tolerance_quant);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEAddMulQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(combine(datasets::LargeShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ ActivationFunctionsDataset),
+ qasymm8_input1_qinfo_set),
+ qasymm8_input2_qinfo_set),
+ qasymm8_bn_mul_qinfo_set),
+ qasymm8_bn_add_qinfo_set),
+ qasymm8_add_output_qinfo_set),
+ qasymm8_final_output_qinfo_set))
+{
+ // Validate outputs
+ validate(Accessor(_interm_target), _interm_reference, tolerance_quant);
+ validate(Accessor(_target), _reference, tolerance_quant);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEAddMulQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ ActivationFunctionsDataset),
+ qasymm8_signed_input1_qinfo_set),
+ qasymm8_signed_input2_qinfo_set),
+ qasymm8_signed_bn_mul_qinfo_set),
+ qasymm8_signed_bn_add_qinfo_set),
+ qasymm8_signed_add_output_qinfo_set),
+ qasymm8_signed_final_output_qinfo_set))
+{
+ // Validate outputs
+ validate(Accessor(_interm_target), _interm_reference, tolerance_quant);
+ validate(Accessor(_target), _reference, tolerance_quant);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEAddMulQuantizedFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(combine(datasets::LargeShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ ActivationFunctionsDataset),
+ qasymm8_signed_input1_qinfo_set),
+ qasymm8_signed_input2_qinfo_set),
+ qasymm8_signed_bn_mul_qinfo_set),
+ qasymm8_signed_bn_add_qinfo_set),
+ qasymm8_signed_add_output_qinfo_set),
+ qasymm8_signed_final_output_qinfo_set))
+{
+ // Validate outputs
+ validate(Accessor(_interm_target), _interm_reference, tolerance_quant);
+ validate(Accessor(_target), _reference, tolerance_quant);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
+
+TEST_SUITE_END() // AddMulAdd
+TEST_SUITE_END() // NEON
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/AddMulAddFixture.h b/tests/validation/fixtures/AddMulAddFixture.h
new file mode 100644
index 0000000000..fac2bfe528
--- /dev/null
+++ b/tests/validation/fixtures/AddMulAddFixture.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef TESTS_VALIDATION_FIXTURES_ADDMULADDFIXTURE
+#define TESTS_VALIDATION_FIXTURES_ADDMULADDFIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ActivationLayer.h"
+#include "tests/validation/reference/ArithmeticOperations.h"
+#include "tests/validation/reference/DequantizationLayer.h"
+#include "tests/validation/reference/PixelWiseMultiplication.h"
+#include "tests/validation/reference/QuantizationLayer.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class AddMulAddGenericFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info, bool interm_out)
+ {
+ compute_target(shape, data_type, act_info, interm_out);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i, DataType data_type)
+ {
+ switch(data_type)
+ {
+ case DataType::F32:
+ library->fill_tensor_uniform(tensor, i, -10.f, 10.f);
+ break;
+ case DataType::F16:
+ library->fill_tensor_uniform(tensor, i, -1.f, 1.f);
+ break;
+ default:
+ library->fill_tensor_uniform(tensor, i);
+ break;
+ }
+ }
+
+ void compute_target(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info, bool interm_out)
+ {
+ TensorShape b_shape(shape.x());
+
+ // Create tensors
+ TensorType input1 = create_tensor<TensorType>(shape, data_type, 1, _input1_qinfo);
+ TensorType input2 = create_tensor<TensorType>(shape, data_type, 1, _input2_qinfo);
+ TensorType bn_mul = create_tensor<TensorType>(b_shape, data_type, 1, _bn_mul_qinfo);
+ TensorType bn_add = create_tensor<TensorType>(b_shape, data_type, 1, _bn_add_qinfo);
+ TensorType add_output = create_tensor<TensorType>(shape, data_type, 1, _add_output_qinfo);
+ TensorType final_output = create_tensor<TensorType>(shape, data_type, 1, _final_output_qinfo);
+
+ // Create and configure function
+ FunctionType add_mul_add;
+ add_mul_add.configure(&input1, &input2, &bn_mul, &bn_add, interm_out ? &add_output : nullptr, &final_output, ConvertPolicy::SATURATE, act_info);
+
+ // Allocate tensors
+ input1.allocator()->allocate();
+ input2.allocator()->allocate();
+ bn_mul.allocator()->allocate();
+ bn_add.allocator()->allocate();
+
+ if(interm_out)
+ {
+ add_output.allocator()->allocate();
+ }
+
+ final_output.allocator()->allocate();
+
+ // Fill tensors
+ fill(AccessorType(input1), 0, data_type);
+ fill(AccessorType(input2), 1, data_type);
+ fill(AccessorType(bn_mul), 2, data_type);
+ fill(AccessorType(bn_add), 3, data_type);
+
+ // // Compute function
+ add_mul_add.run();
+
+ _target = std::move(final_output);
+
+ if(interm_out)
+ {
+ _interm_target = std::move(add_output);
+ }
+ }
+
+ TensorType _target{};
+ TensorType _interm_target{};
+ SimpleTensor<T> _reference{};
+ SimpleTensor<T> _interm_reference{};
+
+ QuantizationInfo _input1_qinfo{};
+ QuantizationInfo _input2_qinfo{};
+ QuantizationInfo _bn_mul_qinfo{};
+ QuantizationInfo _bn_add_qinfo{};
+ QuantizationInfo _add_output_qinfo{};
+ QuantizationInfo _final_output_qinfo{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool interm_out>
+class AddMulAddFloatValidationFixture : public AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ using Parent = AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T>;
+
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, ActivationLayerInfo act_info)
+ {
+ Parent::setup(shape, data_type, act_info, interm_out);
+ compute_reference(shape, data_type, act_info);
+ }
+
+ // Compute Reference is moved outside of the generic fixture because with the quantized data types,
+ // it becomes a very different implementation with intermediate tensors' data types being always float.
+ // This way the reference calculations are more readable and the size of the classes will be smaller
+ // due to unrepeated fill() and target() methods.
+ void compute_reference(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info)
+ {
+ TensorShape b_shape(shape.x());
+
+ // Create reference
+ SimpleTensor<T> input1{ shape, data_type };
+ SimpleTensor<T> input2{ shape, data_type };
+ SimpleTensor<T> bn_mul{ b_shape, data_type };
+ SimpleTensor<T> bn_add{ b_shape, data_type };
+ SimpleTensor<T> add_output{ shape, data_type, 1 };
+
+ SimpleTensor<T> bn_mul_out{ shape, data_type };
+ SimpleTensor<T> bn_add_out{ shape, data_type };
+
+ // Fill reference
+ Parent::fill(input1, 0, data_type);
+ Parent::fill(input2, 1, data_type);
+ Parent::fill(bn_mul, 2, data_type);
+ Parent::fill(bn_add, 3, data_type);
+
+ reference::arithmetic_operation<T>(reference::ArithmeticOperation::ADD, input1, input2, add_output, ConvertPolicy::SATURATE);
+ bn_mul_out = reference::pixel_wise_multiplication<T, T, T>(add_output, bn_mul, 1.f, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_UP, data_type);
+ reference::arithmetic_operation<T>(reference::ArithmeticOperation::ADD, bn_mul_out, bn_add, bn_add_out, ConvertPolicy::SATURATE);
+
+ if(interm_out)
+ {
+ Parent::_interm_reference = std::move(add_output);
+ }
+
+ if(act_info.enabled() && act_info.activation() != ActivationLayerInfo::ActivationFunction::IDENTITY)
+ {
+ Parent::_reference = reference::activation_layer(bn_add_out, act_info);
+ }
+ else
+ {
+ Parent::_reference = std::move(bn_add_out);
+ }
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool interm_out>
+class AddMulAddQuantizedValidationFixture : public AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ using Parent = AddMulAddGenericFixture<TensorType, AccessorType, FunctionType, T>;
+
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, ActivationLayerInfo act_info,
+ QuantizationInfo input1_qinfo, QuantizationInfo input2_qinfo, QuantizationInfo bn_mul_qinfo,
+ QuantizationInfo bn_add_qinfo, QuantizationInfo add_output_qinfo, QuantizationInfo final_output_qinfo)
+ {
+ // Quantization arguments moved to class attributes to prevent long function declerations
+ Parent::_input1_qinfo = input1_qinfo;
+ Parent::_input2_qinfo = input2_qinfo;
+ Parent::_bn_mul_qinfo = bn_mul_qinfo;
+ Parent::_bn_add_qinfo = bn_add_qinfo;
+ Parent::_add_output_qinfo = add_output_qinfo;
+ Parent::_final_output_qinfo = final_output_qinfo;
+
+ Parent::setup(shape, data_type, act_info, interm_out);
+ compute_reference(shape, data_type, act_info);
+ }
+
+ // Compute Reference is moved outside of the generic fixture because with the quantized data types,
+ // it becomes a very different implementation with intermediate tensors' data types being always float.
+ // This way the reference calculations are more readable and the size of the classes will be smaller
+ // due to unrepeated fill() and target() methods.
+ void compute_reference(const TensorShape &shape, DataType data_type, ActivationLayerInfo &act_info)
+ {
+ TensorShape b_shape(shape.x());
+
+ // Create reference
+ SimpleTensor<T> input1{ shape, data_type, 1, Parent::_input1_qinfo };
+ SimpleTensor<T> input2{ shape, data_type, 1, Parent::_input2_qinfo };
+ SimpleTensor<T> bn_mul{ b_shape, data_type, 1, Parent::_bn_mul_qinfo };
+ SimpleTensor<T> bn_add{ b_shape, data_type, 1, Parent::_bn_add_qinfo };
+
+ // Fill input tensors
+ Parent::fill(input1, 0, data_type);
+ Parent::fill(input2, 1, data_type);
+ Parent::fill(bn_mul, 2, data_type);
+ Parent::fill(bn_add, 3, data_type);
+
+ SimpleTensor<float> input1_dequantized = reference::dequantization_layer<float>(input1);
+ SimpleTensor<float> input2_dequantized = reference::dequantization_layer<float>(input2);
+ SimpleTensor<float> bn_mul_dequantized = reference::dequantization_layer<float>(bn_mul);
+ SimpleTensor<float> bn_add_dequantized = reference::dequantization_layer<float>(bn_add);
+
+ SimpleTensor<float> add_output_dequantized{ shape, DataType::F32 };
+ SimpleTensor<float> bn_add_out_dequantized{ shape, DataType::F32 };
+
+ reference::arithmetic_operation<float>(reference::ArithmeticOperation::ADD, input1_dequantized, input2_dequantized, add_output_dequantized, ConvertPolicy::SATURATE);
+ SimpleTensor<float> bn_mul_out_dequantized = reference::pixel_wise_multiplication<float, float, float>(add_output_dequantized, bn_mul_dequantized, 1.f, ConvertPolicy::SATURATE,
+ RoundingPolicy::TO_NEAREST_UP, DataType::F32);
+ reference::arithmetic_operation<float>(reference::ArithmeticOperation::ADD, bn_mul_out_dequantized, bn_add_dequantized, bn_add_out_dequantized, ConvertPolicy::SATURATE);
+
+ if(interm_out)
+ {
+ Parent::_interm_reference = reference::quantization_layer<float, T>(add_output_dequantized, data_type, Parent::_add_output_qinfo);
+ }
+
+ if(act_info.enabled() && act_info.activation() != ActivationLayerInfo::ActivationFunction::IDENTITY)
+ {
+ SimpleTensor<T> ref = reference::quantization_layer<float, T>(bn_add_out_dequantized, data_type, Parent::_final_output_qinfo);
+ Parent::_reference = reference::activation_layer(ref, act_info);
+ }
+ else
+ {
+ Parent::_reference = reference::quantization_layer<float, T>(bn_add_out_dequantized, data_type, Parent::_final_output_qinfo);
+ }
+ }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif /* TESTS_VALIDATION_FIXTURES_ADDMULADDFIXTURE */