diff options
author | Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> | 2023-09-27 17:46:17 +0100 |
---|---|---|
committer | felixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> | 2023-09-28 12:08:05 +0000 |
commit | afd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch) | |
tree | 03bc7d5a762099989b16a656fa8d397b490ed70e /src/cpu/kernels/addmuladd/generic/neon/fp32.cpp | |
parent | bdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff) | |
download | ComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz |
Apply clang-format on repository
Code is formatted as per a revised clang format configuration
file(not part of this delivery). Version 14.0.6 is used.
Exclusion List:
- files with .cl extension
- files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...)
And the following directories
- compute_kernel_writer/validation/
- tests/
- include/
- src/core/NEON/kernels/convolution/
- src/core/NEON/kernels/arm_gemm/
- src/core/NEON/kernels/arm_conv/
- data/
There will be a follow up for formatting of .cl files and the
files under tests/ and compute_kernel_writer/validation/.
Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>
Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'src/cpu/kernels/addmuladd/generic/neon/fp32.cpp')
-rw-r--r-- | src/cpu/kernels/addmuladd/generic/neon/fp32.cpp | 104 |
1 files changed, 56 insertions, 48 deletions
diff --git a/src/cpu/kernels/addmuladd/generic/neon/fp32.cpp b/src/cpu/kernels/addmuladd/generic/neon/fp32.cpp index b0c487ec56..f0444b6acd 100644 --- a/src/cpu/kernels/addmuladd/generic/neon/fp32.cpp +++ b/src/cpu/kernels/addmuladd/generic/neon/fp32.cpp @@ -35,16 +35,20 @@ #ifdef __aarch64__ namespace { -void a64_add_bn_clamp_direct_fp32_2x16( - float *out, size_t out_stride, - float *out_direct, size_t out_direct_stride, - const float *in0, size_t in0_stride, - const float *in1, size_t in1_stride, - const float *bn_mul, - const float *bn_add, - const float minval, - const float maxval, - size_t width, size_t height) +void a64_add_bn_clamp_direct_fp32_2x16(float *out, + size_t out_stride, + float *out_direct, + size_t out_direct_stride, + const float *in0, + size_t in0_stride, + const float *in1, + size_t in1_stride, + const float *bn_mul, + const float *bn_add, + const float minval, + const float maxval, + size_t width, + size_t height) { struct KernelArgs { @@ -631,18 +635,30 @@ void a64_add_bn_clamp_direct_fp32_2x16( "subs x20, x20, #0x2\n" "bgt 8b\n" "34:" // odd columns skip - : [bn_add] "+&r"(bn_add), [bn_mul] "+&r"(bn_mul), [in0] "+&r"(in0), [in1] "+&r"(in1), [out] "+&r"(out), [out_direct] "+&r"(out_direct), [width] "+&r"(width) - : [args_ptr] "r"(&ka), [height] "r"(height), [in0_stride] "r"(in0_stride), [in1_stride] "r"(in1_stride), [offsetof_maxval] "I"(offsetof(KernelArgs, maxval)), [offsetof_minval] "I"(offsetof(KernelArgs, minval)), [out_direct_stride] "r"(out_direct_stride), [out_stride] "r"(out_stride) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"); -} + : [bn_add] "+&r"(bn_add), [bn_mul] "+&r"(bn_mul), [in0] "+&r"(in0), [in1] "+&r"(in1), [out] "+&r"(out), + [out_direct] "+&r"(out_direct), [width] "+&r"(width) + : [args_ptr] "r"(&ka), [height] "r"(height), [in0_stride] "r"(in0_stride), [in1_stride] "r"(in1_stride), + [offsetof_maxval] "I"(offsetof(KernelArgs, maxval)), [offsetof_minval] "I"(offsetof(KernelArgs, minval)), + [out_direct_stride] "r"(out_direct_stride), [out_stride] "r"(out_stride) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", + "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", + "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"); } +} // namespace namespace arm_compute { namespace cpu { -void add_mul_add_fp32_neon(const ITensor *input1, const ITensor *input2, const ITensor *bn_mul, const ITensor *bn_add, - ITensor *add_output, ITensor *final_output, ConvertPolicy policy, const ActivationLayerInfo &act_info, const Window &window) +void add_mul_add_fp32_neon(const ITensor *input1, + const ITensor *input2, + const ITensor *bn_mul, + const ITensor *bn_add, + ITensor *add_output, + ITensor *final_output, + ConvertPolicy policy, + const ActivationLayerInfo &act_info, + const Window &window) { ARM_COMPUTE_UNUSED(policy); @@ -654,16 +670,16 @@ void add_mul_add_fp32_neon(const ITensor *input1, const ITensor *input2, const I float minval = std::numeric_limits<float>::lowest(); float maxval = std::numeric_limits<float>::max(); - if(act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU) + if (act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU) { minval = 0.f; } - else if(act_info.activation() == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU) + else if (act_info.activation() == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU) { minval = 0.f; maxval = act_info.a(); } - else if(act_info.activation() == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) + else if (act_info.activation() == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU) { minval = act_info.b(); maxval = act_info.a(); @@ -681,42 +697,34 @@ void add_mul_add_fp32_neon(const ITensor *input1, const ITensor *input2, const I const size_t width = window.num_iterations(0); const size_t height = window.num_iterations(1); - if(add_output != nullptr) + if (add_output != nullptr) { Iterator add_out_it(add_output, window); execute_window_loop( - win, [&](const Coordinates &) - { - a64_add_bn_clamp_direct_fp32_2x16( - reinterpret_cast<float *>(out_it.ptr()), out_stride, - reinterpret_cast<float *>(add_out_it.ptr()), out_direct_stride, - reinterpret_cast<float *>(in1_it.ptr()), in0_stride, - reinterpret_cast<float *>(in2_it.ptr()), in1_stride, - reinterpret_cast<float *>(bn_mul->buffer()), - reinterpret_cast<float *>(bn_add->buffer()), - minval, - maxval, - width, height); - }, - in1_it, in2_it, add_out_it, out_it); + win, + [&](const Coordinates &) + { + a64_add_bn_clamp_direct_fp32_2x16( + reinterpret_cast<float *>(out_it.ptr()), out_stride, reinterpret_cast<float *>(add_out_it.ptr()), + out_direct_stride, reinterpret_cast<float *>(in1_it.ptr()), in0_stride, + reinterpret_cast<float *>(in2_it.ptr()), in1_stride, reinterpret_cast<float *>(bn_mul->buffer()), + reinterpret_cast<float *>(bn_add->buffer()), minval, maxval, width, height); + }, + in1_it, in2_it, add_out_it, out_it); } else { execute_window_loop( - win, [&](const Coordinates &) - { - a64_add_bn_clamp_direct_fp32_2x16( - reinterpret_cast<float *>(out_it.ptr()), out_stride, - nullptr, out_direct_stride, - reinterpret_cast<float *>(in1_it.ptr()), in0_stride, - reinterpret_cast<float *>(in2_it.ptr()), in1_stride, - reinterpret_cast<float *>(bn_mul->buffer()), - reinterpret_cast<float *>(bn_add->buffer()), - minval, - maxval, - width, height); - }, - in1_it, in2_it, out_it); + win, + [&](const Coordinates &) + { + a64_add_bn_clamp_direct_fp32_2x16( + reinterpret_cast<float *>(out_it.ptr()), out_stride, nullptr, out_direct_stride, + reinterpret_cast<float *>(in1_it.ptr()), in0_stride, reinterpret_cast<float *>(in2_it.ptr()), + in1_stride, reinterpret_cast<float *>(bn_mul->buffer()), + reinterpret_cast<float *>(bn_add->buffer()), minval, maxval, width, height); + }, + in1_it, in2_it, out_it); } } } // namespace cpu |