aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDana Zlotnik <dana.zlotnik@arm.com>2022-02-21 13:12:41 +0200
committerDana Zlotnik <dana.zlotnik@arm.com>2022-03-01 11:19:23 +0000
commita538ae583c8816f69d05b98c62a9d3092f88f798 (patch)
tree33ffb4611e9d660223b50d805ac60babbaba4ebb
parentee9050089e391e598cd58e05bc7a07597a6d1db0 (diff)
downloadComputeLibrary-a538ae583c8816f69d05b98c62a9d3092f88f798.tar.gz
Multi ISA Technical Debt
* Update json struct meet multi-ISA updates * Add impl.cpp in kernels where we only have impl.h Resolves COMPMID-5173 Change-Id: I5da3c4b016a5d0115c4ba46cbfefde7bce518ac1 Signed-off-by: Dana Zlotnik <dana.zlotnik@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7191 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp2
-rw-r--r--filelist.json21
-rw-r--r--src/core/NEON/kernels/NECropKernel.cpp4
-rw-r--r--src/cpu/kernels/CpuActivationKernel.cpp32
-rw-r--r--src/cpu/kernels/CpuAddKernel.cpp2
-rw-r--r--src/cpu/kernels/CpuPool2dKernel.cpp6
-rw-r--r--src/cpu/kernels/CpuScaleKernel.cpp8
-rw-r--r--src/cpu/kernels/CpuSubKernel.cpp2
-rw-r--r--src/cpu/kernels/crop/generic/neon/integer.cpp4
-rw-r--r--src/cpu/kernels/crop/list.h (renamed from src/cpu/kernels/crop/generic/neon/list.h)2
-rw-r--r--src/cpu/kernels/elementwise_binary/generic/neon/impl.h1
-rw-r--r--src/cpu/kernels/elementwise_unary/generic/neon/impl.cpp115
-rw-r--r--src/cpu/kernels/elementwise_unary/generic/neon/impl.h79
-rw-r--r--src/cpu/kernels/softmax/generic/neon/impl.cpp399
-rw-r--r--src/cpu/kernels/softmax/generic/neon/impl.h349
-rw-r--r--src/cpu/kernels/softmax/generic/sve2/impl.cpp211
-rw-r--r--src/cpu/kernels/softmax/generic/sve2/impl.h176
17 files changed, 771 insertions, 642 deletions
diff --git a/Android.bp b/Android.bp
index 3a49b8c362..a279fdf5bb 100644
--- a/Android.bp
+++ b/Android.bp
@@ -458,6 +458,7 @@ cc_library_static {
"src/cpu/kernels/elementwise_binary/generic/neon/qasymm8_signed.cpp",
"src/cpu/kernels/elementwise_unary/generic/neon/fp16.cpp",
"src/cpu/kernels/elementwise_unary/generic/neon/fp32.cpp",
+ "src/cpu/kernels/elementwise_unary/generic/neon/impl.cpp",
"src/cpu/kernels/elementwise_unary/generic/neon/integer.cpp",
"src/cpu/kernels/floor/neon/fp16.cpp",
"src/cpu/kernels/floor/neon/fp32.cpp",
@@ -511,6 +512,7 @@ cc_library_static {
"src/cpu/kernels/select/generic/neon/integer.cpp",
"src/cpu/kernels/softmax/generic/neon/fp16.cpp",
"src/cpu/kernels/softmax/generic/neon/fp32.cpp",
+ "src/cpu/kernels/softmax/generic/neon/impl.cpp",
"src/cpu/kernels/softmax/generic/neon/qasymm8.cpp",
"src/cpu/kernels/softmax/generic/neon/qasymm8_signed.cpp",
"src/cpu/kernels/sub/neon/qasymm8.cpp",
diff --git a/filelist.json b/filelist.json
index 7e47df959c..3bdc00aeef 100644
--- a/filelist.json
+++ b/filelist.json
@@ -837,14 +837,14 @@
"common": [
"src/cpu/operators/CpuActivation.cpp",
"src/cpu/kernels/CpuActivationKernel.cpp",
- "src/runtime/NEON/functions/NEActivationLayer.cpp",
- "src/cpu/kernels/activation/generic/neon/qasymm8.cpp",
- "src/cpu/kernels/activation/generic/neon/qasymm8_signed.cpp",
- "src/cpu/kernels/activation/generic/neon/qsymm16.cpp"
+ "src/runtime/NEON/functions/NEActivationLayer.cpp"
],
"neon": {
"fp16": [ "src/cpu/kernels/activation/generic/neon/fp16.cpp" ],
- "fp32": [ "src/cpu/kernels/activation/generic/neon/fp32.cpp" ]
+ "fp32": [ "src/cpu/kernels/activation/generic/neon/fp32.cpp" ],
+ "qasymm8": [ "src/cpu/kernels/activation/generic/neon/qasymm8.cpp" ],
+ "qasymm8_signed": [ "src/cpu/kernels/activation/generic/neon/qasymm8_signed.cpp" ],
+ "qsymm16": [ "src/cpu/kernels/activation/generic/neon/qsymm16.cpp" ]
},
"sve": {
"fp16": [ "src/cpu/kernels/activation/generic/sve/fp16.cpp" ],
@@ -961,7 +961,6 @@
"fp32":["src/cpu/kernels/boundingboxtransform/generic/neon/fp32.cpp"],
"fp16":["src/cpu/kernels/boundingboxtransform/generic/neon/fp16.cpp"],
"qsymm16":["src/cpu/kernels/boundingboxtransform/generic/neon/qsymm16.cpp"]
-
}
}
},
@@ -1304,6 +1303,7 @@
"src/runtime/NEON/functions/NEElementwiseUnaryLayer.cpp"
],
"neon": {
+ "common":["src/cpu/kernels/elementwise_unary/generic/neon/impl.cpp"],
"integer": ["src/cpu/kernels/elementwise_unary/generic/neon/integer.cpp"],
"fp32": ["src/cpu/kernels/elementwise_unary/generic/neon/fp32.cpp"],
"fp16": ["src/cpu/kernels/elementwise_unary/generic/neon/fp16.cpp"]
@@ -1709,8 +1709,6 @@
"src/cpu/operators/CpuPool2d.cpp",
"src/cpu/kernels/CpuPool2dKernel.cpp",
"src/cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp",
- "src/cpu/kernels/pool2d/neon/qasymm8.cpp",
- "src/cpu/kernels/pool2d/neon/qasymm8_signed.cpp",
"src/runtime/NEON/functions/NEPoolingLayer.cpp"
],
"neon": {
@@ -1743,7 +1741,9 @@
],
"nchw": [ "src/cpu/kernels/pool2d/neon/nchw/all.cpp" ],
"fp16": [ "src/cpu/kernels/pool2d/neon/fp16.cpp" ],
- "fp32": [ "src/cpu/kernels/pool2d/neon/fp32.cpp" ]
+ "fp32": [ "src/cpu/kernels/pool2d/neon/fp32.cpp" ],
+ "qasymm8":[ "src/cpu/kernels/pool2d/neon/qasymm8.cpp" ],
+ "qasymm8_signed":["src/cpu/kernels/pool2d/neon/qasymm8_signed.cpp"]
},
"sve": {
"common": [
@@ -1884,7 +1884,6 @@
"integer": [ "src/cpu/kernels/scale/sve/integer.cpp" ],
"qasymm8": [ "src/cpu/kernels/scale/sve/qasymm8.cpp" ],
"qasymm8_signed": [ "src/cpu/kernels/scale/sve/qasymm8_signed.cpp" ]
-
},
"neon": {
"fp16": [ "src/cpu/kernels/scale/neon/fp16.cpp" ],
@@ -1925,6 +1924,7 @@
"src/runtime/NEON/functions/NESoftmaxLayer.cpp"
],
"neon":{
+ "common":["src/cpu/kernels/softmax/generic/neon/impl.cpp"],
"fp32": ["src/cpu/kernels/softmax/generic/neon/fp32.cpp"],
"fp16": ["src/cpu/kernels/softmax/generic/neon/fp16.cpp"],
"qasymm8":[ "src/cpu/kernels/softmax/generic/neon/qasymm8.cpp"],
@@ -1938,6 +1938,7 @@
"qasymm8_signed": ["src/cpu/kernels/softmax/generic/sve/qasymm8_signed.cpp"]
},
"sve2":{
+ "common" :["src/cpu/kernels/softmax/generic/sve2/impl.cpp"],
"qasymm8":[ "src/cpu/kernels/softmax/generic/sve2/qasymm8.cpp"],
"qasymm8_signed":["src/cpu/kernels/softmax/generic/sve2/qasymm8_signed.cpp"]
}
diff --git a/src/core/NEON/kernels/NECropKernel.cpp b/src/core/NEON/kernels/NECropKernel.cpp
index 729402116d..94c455305c 100644
--- a/src/core/NEON/kernels/NECropKernel.cpp
+++ b/src/core/NEON/kernels/NECropKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,7 +35,7 @@
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "src/core/utils/helpers/bit_ops.h"
-#include "src/cpu/kernels/crop/generic/neon/list.h"
+#include "src/cpu/kernels/crop/list.h"
namespace arm_compute
{
diff --git a/src/cpu/kernels/CpuActivationKernel.cpp b/src/cpu/kernels/CpuActivationKernel.cpp
index cfe732ab5f..d5112b4ba9 100644
--- a/src/cpu/kernels/CpuActivationKernel.cpp
+++ b/src/cpu/kernels/CpuActivationKernel.cpp
@@ -46,8 +46,23 @@ namespace
static const std::vector<CpuActivationKernel::ActivationKernel> available_kernels =
{
{
+ "sve2_qu8_activation",
+ [](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8 && data.isa.sve2; },
+ REGISTER_QASYMM8_SVE2(arm_compute::cpu::sve2_qasymm8_activation)
+ },
+ {
+ "sve2_qs8_activation",
+ [](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8_SIGNED && data.isa.sve2; },
+ REGISTER_QASYMM8_SIGNED_SVE2(arm_compute::cpu::sve2_qasymm8_signed_activation)
+ },
+ {
+ "sve2_qs16_activation",
+ [](const DataTypeISASelectorData & data) { return data.dt == DataType::QSYMM16 && data.isa.sve2; },
+ REGISTER_QSYMM16_SVE2(arm_compute::cpu::sve2_qsymm16_activation)
+ },
+ {
"sve_fp16_activation",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.sve; },
+ [](const DataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.sve && data.isa.fp16; },
REGISTER_FP16_SVE(arm_compute::cpu::sve_fp16_activation)
},
{
@@ -66,21 +81,6 @@ static const std::vector<CpuActivationKernel::ActivationKernel> available_kernel
REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_activation)
},
{
- "sve2_qu8_activation",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8 && data.isa.sve2; },
- REGISTER_QASYMM8_SVE2(arm_compute::cpu::sve2_qasymm8_activation)
- },
- {
- "sve2_qs8_activation",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8_SIGNED && data.isa.sve2; },
- REGISTER_QASYMM8_SIGNED_SVE2(arm_compute::cpu::sve2_qasymm8_signed_activation)
- },
- {
- "sve2_qs16_activation",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::QSYMM16 && data.isa.sve2; },
- REGISTER_QSYMM16_SVE2(arm_compute::cpu::sve2_qsymm16_activation)
- },
- {
"neon_qu8_activation",
[](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8; },
REGISTER_QASYMM8_NEON(arm_compute::cpu::neon_qasymm8_activation)
diff --git a/src/cpu/kernels/CpuAddKernel.cpp b/src/cpu/kernels/CpuAddKernel.cpp
index d06621fae0..e756effea9 100644
--- a/src/cpu/kernels/CpuAddKernel.cpp
+++ b/src/cpu/kernels/CpuAddKernel.cpp
@@ -79,7 +79,7 @@ static const std::vector<CpuAddKernel::AddKernel> available_kernels =
"sve_fp16_add",
[](const DataTypeISASelectorData & data)
{
- return (data.dt == DataType::F16) && data.isa.sve;
+ return (data.dt == DataType::F16) && data.isa.sve && data.isa.fp16;
},
REGISTER_FP16_SVE(arm_compute::cpu::add_fp16_sve)
},
diff --git a/src/cpu/kernels/CpuPool2dKernel.cpp b/src/cpu/kernels/CpuPool2dKernel.cpp
index d0ca2d285d..8f04812b0c 100644
--- a/src/cpu/kernels/CpuPool2dKernel.cpp
+++ b/src/cpu/kernels/CpuPool2dKernel.cpp
@@ -64,13 +64,11 @@ static const std::vector<CpuPool2dKernel::PoolingKernel> available_kernels =
[](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NHWC) && (data.dt == DataType::QASYMM8_SIGNED)); },
REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::poolingMxN_qasymm8_signed_neon_nhwc)
},
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
{
"neon_f16_nhwc_poolMxN",
- [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NHWC) && (data.dt == DataType::F16)); },
+ [](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NHWC) && (data.dt == DataType::F16)) && data.isa.fp16; },
REGISTER_FP16_NEON(arm_compute::cpu::poolingMxN_fp16_neon_nhwc)
},
-#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) */
{
"neon_fp32_nhwc_poolMxN",
[](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NHWC) && (data.dt == DataType::F32)); },
@@ -107,7 +105,6 @@ static const std::vector<CpuPool2dKernel::PoolingKernel> available_kernels =
[](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::QASYMM8_SIGNED)); },
REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::poolingMxN_quantized_neon_nchw<int8_t>)
},
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
{
"neon_fp16_nchw_pool2",
[](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::F16 && data.isa.fp16) && (data.pool_size.x() == data.pool_size.y()) && (data.pool_size.x() == 2)); },
@@ -123,7 +120,6 @@ static const std::vector<CpuPool2dKernel::PoolingKernel> available_kernels =
[](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::F16 && data.isa.fp16)); },
REGISTER_FP16_NEON(arm_compute::cpu::poolingMxN_fp16_neon_nchw)
},
-#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) */
{
"neon_fp32_nchw_pool2",
[](const PoolDataTypeISASelectorData & data) { return ((data.dl == DataLayout::NCHW) && (data.dt == DataType::F32) && (data.pool_size.x() == data.pool_size.y()) && (data.pool_size.x() == 2)); },
diff --git a/src/cpu/kernels/CpuScaleKernel.cpp b/src/cpu/kernels/CpuScaleKernel.cpp
index 60564a97dd..e230dfa938 100644
--- a/src/cpu/kernels/CpuScaleKernel.cpp
+++ b/src/cpu/kernels/CpuScaleKernel.cpp
@@ -50,10 +50,9 @@ namespace
{
static const std::vector<CpuScaleKernel::ScaleKernel> available_kernels =
{
-#if defined(ARM_COMPUTE_ENABLE_SVE)
{
"sve_fp16_scale",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.sve; },
+ [](const DataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.sve && data.isa.fp16; },
REGISTER_FP16_SVE(arm_compute::cpu::fp16_sve_scale)
},
{
@@ -81,15 +80,11 @@ static const std::vector<CpuScaleKernel::ScaleKernel> available_kernels =
[](const DataTypeISASelectorData & data) { return data.dt == DataType::S16 && data.isa.sve; },
REGISTER_INTEGER_SVE(arm_compute::cpu::s16_sve_scale)
},
-#endif /* defined(ARM_COMPUTE_ENABLE_SVE) */
-#if defined(ARM_COMPUTE_ENABLE_NEON)
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
{
"neon_fp16_scale",
[](const DataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.fp16; },
REGISTER_FP16_NEON(arm_compute::cpu::common_neon_scale<float16_t>)
},
-#endif /* !defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) */
{
"neon_fp32_scale",
[](const DataTypeISASelectorData & data) { return data.dt == DataType::F32; },
@@ -115,7 +110,6 @@ static const std::vector<CpuScaleKernel::ScaleKernel> available_kernels =
[](const DataTypeISASelectorData & data) { return data.dt == DataType::S16; },
REGISTER_INTEGER_NEON(arm_compute::cpu::s16_neon_scale)
},
-#endif /* defined(ARM_COMPUTE_ENABLE_NEON) */
};
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dx, const ITensorInfo *dy,
diff --git a/src/cpu/kernels/CpuSubKernel.cpp b/src/cpu/kernels/CpuSubKernel.cpp
index c12feb4331..c55d11e899 100644
--- a/src/cpu/kernels/CpuSubKernel.cpp
+++ b/src/cpu/kernels/CpuSubKernel.cpp
@@ -46,13 +46,11 @@ static const std::vector<CpuSubKernel::SubKernel> available_kernels =
[](const DataTypeISASelectorData & data) { return (data.dt == DataType::F32); },
REGISTER_FP32_NEON(arm_compute::cpu::sub_same_neon<float>)
},
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
{
"neon_fp16_sub",
[](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16) && data.isa.fp16; },
REGISTER_FP16_NEON(arm_compute::cpu::sub_same_neon<float16_t>)
},
-#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) */
{
"neon_u8_sub",
[](const DataTypeISASelectorData & data) { return (data.dt == DataType::U8); },
diff --git a/src/cpu/kernels/crop/generic/neon/integer.cpp b/src/cpu/kernels/crop/generic/neon/integer.cpp
index 7dbf0a7f5e..ebf2c1fbd3 100644
--- a/src/cpu/kernels/crop/generic/neon/integer.cpp
+++ b/src/cpu/kernels/crop/generic/neon/integer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,7 +23,7 @@
*/
#include "src/cpu/kernels/crop/generic/neon/impl.h"
-#include "src/cpu/kernels/crop/generic/neon/list.h"
+#include "src/cpu/kernels/crop/list.h"
namespace arm_compute
{
diff --git a/src/cpu/kernels/crop/generic/neon/list.h b/src/cpu/kernels/crop/list.h
index f33049304d..a6b83215ae 100644
--- a/src/cpu/kernels/crop/generic/neon/list.h
+++ b/src/cpu/kernels/crop/list.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited.
+ * Copyright (c) 2021-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
diff --git a/src/cpu/kernels/elementwise_binary/generic/neon/impl.h b/src/cpu/kernels/elementwise_binary/generic/neon/impl.h
index ead54ab14e..98b154e8fd 100644
--- a/src/cpu/kernels/elementwise_binary/generic/neon/impl.h
+++ b/src/cpu/kernels/elementwise_binary/generic/neon/impl.h
@@ -69,6 +69,7 @@ typename VectorType::type elementwise_arithm_op(const typename VectorType::type
return res;
}
+
template <ArithmeticOperation op, typename ScalarType, typename VectorType>
typename VectorType::type elementwise_arithm_op_broadcast(const typename VectorType::type &a, const ScalarType &broadcast_value, const bool reorder)
{
diff --git a/src/cpu/kernels/elementwise_unary/generic/neon/impl.cpp b/src/cpu/kernels/elementwise_unary/generic/neon/impl.cpp
new file mode 100644
index 0000000000..30caa4ebeb
--- /dev/null
+++ b/src/cpu/kernels/elementwise_unary/generic/neon/impl.cpp
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2018-2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/cpu/kernels/elementwise_unary/generic/neon/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+template <typename ScalarType>
+inline ScalarType elementwise_op_scalar_imp(ElementWiseUnary op, const ScalarType &a)
+{
+ switch(op)
+ {
+ case ElementWiseUnary::RSQRT:
+ return 1 / sqrt(a);
+ case ElementWiseUnary::EXP:
+ return std::exp(a);
+ case ElementWiseUnary::NEG:
+ return -a;
+ case ElementWiseUnary::LOG:
+ return std::log(a);
+ case ElementWiseUnary::ABS:
+ return std::abs(a);
+ case ElementWiseUnary::ROUND:
+ return support::cpp11::nearbyint(a);
+ case ElementWiseUnary::SIN:
+ return std::sin(a);
+ default:
+ ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+ }
+}
+
+template <typename ScalarType, typename VectorType>
+inline VectorType elementwise_op_imp(ElementWiseUnary op, const VectorType &a)
+{
+ switch(op)
+ {
+ case ElementWiseUnary::RSQRT:
+ return wrapper::vinvsqrt(a);
+ case ElementWiseUnary::EXP:
+ return wrapper::vexpq(a);
+ case ElementWiseUnary::NEG:
+ return wrapper::vneg(a);
+ case ElementWiseUnary::LOG:
+ return wrapper::vlog(a);
+ case ElementWiseUnary::ABS:
+ return wrapper::vabs(a);
+ case ElementWiseUnary::ROUND:
+ return wrapper::vround(a);
+ case ElementWiseUnary::SIN:
+ return wrapper::vsin(a);
+ default:
+ ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+ }
+}
+
+template <typename ScalarType>
+void elementwise_op(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+{
+ const int window_step_x = 16 / sizeof(ScalarType);
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ Window win = window;
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator input(in, win);
+ Iterator output(out, win);
+
+ execute_window_loop(win, [&](const Coordinates &)
+ {
+ auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
+ const auto input_ptr = reinterpret_cast<const ScalarType *>(input.ptr());
+
+ int x = window_start_x;
+ for(; x <= window_end_x - window_step_x; x += window_step_x)
+ {
+ wrapper::vstore(output_ptr + x, elementwise_op_imp<ScalarType>(op, wrapper::vloadq(input_ptr + x)));
+ }
+ for(; x < window_end_x; ++x)
+ {
+ *(output_ptr + x) = elementwise_op_scalar_imp(op, *(input_ptr + x));
+ }
+ },
+ input, output);
+}
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
+template void elementwise_op<__fp16>(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op);
+#endif //defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
+template void elementwise_op<float>(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op);
+template void elementwise_op<int32_t>(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op);
+
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/elementwise_unary/generic/neon/impl.h b/src/cpu/kernels/elementwise_unary/generic/neon/impl.h
index fd930ae7cf..66b8b5fe45 100644
--- a/src/cpu/kernels/elementwise_unary/generic/neon/impl.h
+++ b/src/cpu/kernels/elementwise_unary/generic/neon/impl.h
@@ -24,6 +24,7 @@
#ifndef SRC_CORE_NEON_KERNELS_ELEMENTWISE_UNARY_LIST_H
#define SRC_CORE_NEON_KERNELS_ELEMENTWISE_UNARY_LIST_H
+#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/Types.h"
#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
@@ -32,83 +33,7 @@ namespace arm_compute
namespace cpu
{
template <typename ScalarType>
-inline ScalarType elementwise_op_scalar_imp(ElementWiseUnary op, const ScalarType &a)
-{
- switch(op)
- {
- case ElementWiseUnary::RSQRT:
- return 1 / sqrt(a);
- case ElementWiseUnary::EXP:
- return std::exp(a);
- case ElementWiseUnary::NEG:
- return -a;
- case ElementWiseUnary::LOG:
- return std::log(a);
- case ElementWiseUnary::ABS:
- return std::abs(a);
- case ElementWiseUnary::ROUND:
- return support::cpp11::nearbyint(a);
- case ElementWiseUnary::SIN:
- return std::sin(a);
- default:
- ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
- }
-}
-
-template <typename ScalarType, typename VectorType>
-inline VectorType elementwise_op_imp(ElementWiseUnary op, const VectorType &a)
-{
- switch(op)
- {
- case ElementWiseUnary::RSQRT:
- return wrapper::vinvsqrt(a);
- case ElementWiseUnary::EXP:
- return wrapper::vexpq(a);
- case ElementWiseUnary::NEG:
- return wrapper::vneg(a);
- case ElementWiseUnary::LOG:
- return wrapper::vlog(a);
- case ElementWiseUnary::ABS:
- return wrapper::vabs(a);
- case ElementWiseUnary::ROUND:
- return wrapper::vround(a);
- case ElementWiseUnary::SIN:
- return wrapper::vsin(a);
- default:
- ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
- }
-}
-
-template <typename ScalarType>
-void elementwise_op(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
-{
- const int window_step_x = 16 / sizeof(ScalarType);
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
-
- Window win = window;
- win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator input(in, win);
- Iterator output(out, win);
-
- execute_window_loop(win, [&](const Coordinates &)
- {
- auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
- const auto input_ptr = reinterpret_cast<const ScalarType *>(input.ptr());
-
- int x = window_start_x;
- for(; x <= window_end_x - window_step_x; x += window_step_x)
- {
- wrapper::vstore(output_ptr + x, elementwise_op_imp<ScalarType>(op, wrapper::vloadq(input_ptr + x)));
- }
- for(; x < window_end_x; ++x)
- {
- *(output_ptr + x) = elementwise_op_scalar_imp(op, *(input_ptr + x));
- }
- },
- input, output);
-}
+void elementwise_op(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op);
} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/kernels/softmax/generic/neon/impl.cpp b/src/cpu/kernels/softmax/generic/neon/impl.cpp
new file mode 100644
index 0000000000..5654bb52ca
--- /dev/null
+++ b/src/cpu/kernels/softmax/generic/neon/impl.cpp
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2021-2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/cpu/kernels/softmax/generic/neon/impl.h"
+#include "src/core/NEON/NEMath.h"
+#include "src/core/NEON/wrapper/wrapper.h"
+#include "support/SaturateCast.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+template <typename T>
+void neon_logits_1d_max(const ITensor *in, ITensor *out, const Window &window)
+{
+ /** SIMD vector tag type. */
+ using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
+
+ constexpr int window_step_x = 16 / sizeof(T);
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ Window win{ window };
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+ Iterator input(in, win);
+ Iterator output(out, win);
+
+ const int sum_stages = log2(window_step_x / 2);
+ execute_window_loop(win, [&](const Coordinates &)
+ {
+ // Get pointers
+ const auto in_ptr = reinterpret_cast<const T *>(input.ptr());
+ const auto out_ptr = reinterpret_cast<T *>(output.ptr());
+
+ // Init max value
+ auto vec_max = wrapper::vdup_n(support::cpp11::lowest<T>(), ExactTagType{});
+ int x = window_start_x;
+
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto current_value = wrapper::vloadq(in_ptr + x);
+ vec_max = wrapper::vmax(vec_max, current_value);
+ }
+ auto carry_max = wrapper::vpmax(wrapper::vgethigh(vec_max), wrapper::vgetlow(vec_max));
+
+ for(int i = 0; i < sum_stages; ++i)
+ {
+ carry_max = wrapper::vpmax(carry_max, carry_max);
+ }
+ T max_val = wrapper::vgetlane(carry_max, 0);
+
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ max_val = *(in_ptr + x) > max_val ? *(in_ptr + x) : max_val;
+ }
+
+ *out_ptr = max_val;
+ },
+ input, output);
+}
+
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+template void neon_logits_1d_max<float16_t>(const ITensor *in, ITensor *out, const Window &window);
+#endif //defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+template void neon_logits_1d_max<float>(const ITensor *in, ITensor *out, const Window &window);
+template void neon_logits_1d_max<qasymm8_signed_t>(const ITensor *in, ITensor *out, const Window &window);
+template void neon_logits_1d_max<qasymm8_t>(const ITensor *in, ITensor *out, const Window &window);
+
+template <typename T>
+void neon_softmax_logits_1d_quantized(const ITensor *in, const ITensor *max, void *const tmp,
+ ITensor *out, float beta, bool is_log, const Window &window)
+{
+ static_assert(std::is_same<T, qasymm8_t>::value
+ || std::is_same<T, qasymm8_signed_t>::value,
+ "quantized type should be either qasymm8_t or qasymm8_signed_t.");
+
+ const int start_x = in->info()->valid_region().anchor.x();
+ const int input_width = in->info()->valid_region().shape.x();
+
+ const float scale_beta = -beta * in->info()->quantization_info().uniform().scale;
+ const auto scale_beta_vec = vdupq_n_f32(scale_beta);
+
+ Iterator in_it(in, window);
+ Iterator max_it(max, window);
+ Iterator out_it(out, window);
+ constexpr int vec_size = 16;
+
+ execute_window_loop(window, [&](const Coordinates &)
+ {
+ /* Get pointers */
+ const auto in_ptr = reinterpret_cast<const T *>(in_it.ptr()) + start_x;
+ const auto out_ptr = reinterpret_cast<T *>(out_it.ptr()) + start_x;
+ const auto tmp_ptr = reinterpret_cast<float *>(tmp);
+
+ float sum{};
+ float sum_inversed{};
+
+ /* Compute exponentials and sum */
+ {
+ /* Get max value */
+ const auto max_val = *reinterpret_cast<const T *>(max_it.ptr());
+ const auto vec_max = wrapper::vdup_n(max_val, wrapper::traits::vector_128_tag{});
+
+ /* Init sum to zero */
+ float32x4x4_t vec_sum =
+ {
+ vdupq_n_f32(0.f),
+ vdupq_n_f32(0.f),
+ vdupq_n_f32(0.f),
+ vdupq_n_f32(0.f),
+ };
+
+ /* Loop over row and compute exponentials and sum */
+ int x = 0;
+ for(; x <= (input_width - vec_size); x += vec_size)
+ {
+ auto vec_elements = wrapper::vloadq(in_ptr + x);
+ vec_elements = wrapper::vqsub(vec_max, vec_elements);
+ auto vec_elements_flt = convert_int_to_float<float32x4x4_t>(vec_elements);
+
+ if(is_log)
+ {
+ vec_elements_flt.val[0] = vmulq_f32(vec_elements_flt.val[0], scale_beta_vec);
+ vec_elements_flt.val[1] = vmulq_f32(vec_elements_flt.val[1], scale_beta_vec);
+ vec_elements_flt.val[2] = vmulq_f32(vec_elements_flt.val[2], scale_beta_vec);
+ vec_elements_flt.val[3] = vmulq_f32(vec_elements_flt.val[3], scale_beta_vec);
+ vec_sum.val[0] = vaddq_f32(vec_sum.val[0], vexpq_f32(vec_elements_flt.val[0]));
+ vec_sum.val[1] = vaddq_f32(vec_sum.val[1], vexpq_f32(vec_elements_flt.val[1]));
+ vec_sum.val[2] = vaddq_f32(vec_sum.val[2], vexpq_f32(vec_elements_flt.val[2]));
+ vec_sum.val[3] = vaddq_f32(vec_sum.val[3], vexpq_f32(vec_elements_flt.val[3]));
+ }
+ else
+ {
+ vec_elements_flt.val[0] = vexpq_f32(vmulq_f32(vec_elements_flt.val[0], scale_beta_vec));
+ vec_elements_flt.val[1] = vexpq_f32(vmulq_f32(vec_elements_flt.val[1], scale_beta_vec));
+ vec_elements_flt.val[2] = vexpq_f32(vmulq_f32(vec_elements_flt.val[2], scale_beta_vec));
+ vec_elements_flt.val[3] = vexpq_f32(vmulq_f32(vec_elements_flt.val[3], scale_beta_vec));
+ vec_sum.val[0] = vaddq_f32(vec_sum.val[0], vec_elements_flt.val[0]);
+ vec_sum.val[1] = vaddq_f32(vec_sum.val[1], vec_elements_flt.val[1]);
+ vec_sum.val[2] = vaddq_f32(vec_sum.val[2], vec_elements_flt.val[2]);
+ vec_sum.val[3] = vaddq_f32(vec_sum.val[3], vec_elements_flt.val[3]);
+ }
+
+ vst4q_f32(tmp_ptr + x, vec_elements_flt);
+ }
+
+ /* Reduce sum */
+ const auto sum_16_byte = vaddq_f32(vaddq_f32(vec_sum.val[0], vec_sum.val[1]), vaddq_f32(vec_sum.val[2], vec_sum.val[3]));
+ auto sum_res = vpadd_f32(vget_high_f32(sum_16_byte), vget_low_f32(sum_16_byte));
+ sum_res = vpadd_f32(sum_res, sum_res);
+ sum = wrapper::vgetlane(sum_res, 0);
+
+ /* Run remaining elements */
+ for(; x < input_width; ++x)
+ {
+ float element{};
+ if(is_log)
+ {
+ element = (max_val - in_ptr[x]) * scale_beta;
+ sum += std::exp(element);
+ }
+ else
+ {
+ element = std::exp((max_val - in_ptr[x]) * scale_beta);
+ sum += element;
+ }
+
+ tmp_ptr[x] = element;
+ }
+
+ if(!is_log)
+ {
+ sum_inversed = 256.f / sum;
+ }
+ else
+ {
+ sum = std::log(sum);
+ }
+ }
+
+ /* Normalize exponentials */
+ {
+ constexpr bool is_qasymm8_signed = std::is_same<T, qasymm8_signed_t>::value;
+ /* Loop over row and compute softmax */
+ int x = 0;
+ for(; x <= (input_width - vec_size); x += vec_size)
+ {
+ using int_vec_type = wrapper::traits::neon_vector_t<T, 16>;
+ float32x4x4_t vec_in = vld4q_f32(tmp_ptr + x);
+ int_vec_type normalized_value{};
+ if(is_log)
+ {
+ const float32x4x4_t sub =
+ {
+ vsubq_f32(vec_in.val[0], vdupq_n_f32(sum)),
+ vsubq_f32(vec_in.val[1], vdupq_n_f32(sum)),
+ vsubq_f32(vec_in.val[2], vdupq_n_f32(sum)),
+ vsubq_f32(vec_in.val[3], vdupq_n_f32(sum)),
+ };
+ normalized_value = convert_float_to_int<float32x4x4_t, int_vec_type>(sub);
+ }
+ else
+ {
+ float32x4x4_t mul =
+ {
+ vmulq_f32(vec_in.val[0], vdupq_n_f32(sum_inversed)),
+ vmulq_f32(vec_in.val[1], vdupq_n_f32(sum_inversed)),
+ vmulq_f32(vec_in.val[2], vdupq_n_f32(sum_inversed)),
+ vmulq_f32(vec_in.val[3], vdupq_n_f32(sum_inversed)),
+ };
+
+ if(is_qasymm8_signed)
+ {
+ const auto offset_vec = wrapper::vdup_n(128.f, wrapper::traits::vector_128_tag{});
+ mul.val[0] = wrapper::vsub(mul.val[0], offset_vec);
+ mul.val[1] = wrapper::vsub(mul.val[1], offset_vec);
+ mul.val[2] = wrapper::vsub(mul.val[2], offset_vec);
+ mul.val[3] = wrapper::vsub(mul.val[3], offset_vec);
+ }
+
+ normalized_value = convert_float_to_int<float32x4x4_t, int_vec_type>(mul);
+ }
+ wrapper::vstore(out_ptr + x, normalized_value);
+ }
+ /* Run remaining elements */
+ for(; x < input_width; ++x)
+ {
+ if(is_log)
+ {
+ out_ptr[x] = utils::cast::saturate_cast<T>(tmp_ptr[x] - sum);
+ }
+ else
+ {
+ out_ptr[x] = utils::cast::saturate_cast<T>((tmp_ptr[x] * sum_inversed) - (is_qasymm8_signed ? 128.f : 0));
+ }
+ }
+ }
+ },
+ in_it, max_it, out_it);
+}
+
+template void neon_softmax_logits_1d_quantized<qasymm8_signed_t>(const ITensor *in, const ITensor *max, void *const tmp,
+ ITensor *out, float beta, bool is_log, const Window &window);
+template void neon_softmax_logits_1d_quantized<qasymm8_t>(const ITensor *in, const ITensor *max, void *const tmp,
+ ITensor *out, float beta, bool is_log, const Window &window);
+template <typename T>
+void neon_softmax_logits_1d_float(const ITensor *in, const ITensor *max, void *const tmp,
+ ITensor *out, const float beta, bool is_log, const Window &window)
+{
+ const int start_x = in->info()->valid_region().anchor.x();
+ const int input_width = in->info()->valid_region().shape.x();
+
+ Iterator in_it(in, window);
+ Iterator max_it(max, window);
+ Iterator out_it(out, window);
+
+ /** SIMD vector tag type. */
+ using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
+
+ constexpr int vec_size = 16 / sizeof(T);
+ const int sum_stages = log2(vec_size / 2);
+
+ execute_window_loop(window, [&](const Coordinates &)
+ {
+ /* Get pointers */
+ const auto in_ptr = reinterpret_cast<const T *>(in_it.ptr()) + start_x;
+ const auto out_ptr = reinterpret_cast<T *>(out_it.ptr()) + start_x;
+ const auto tmp_ptr = reinterpret_cast<T *>(tmp);
+
+ T sum{};
+ T sum_inversed{};
+
+ /* Compute exponentials and sum */
+ {
+ /* Get max value */
+ const auto max_val = *reinterpret_cast<const T *>(max_it.ptr());
+ const auto vec_max = wrapper::vdup_n(max_val, ExactTagType{});
+
+ /* Init sum to zero */
+ auto vec_sum = wrapper::vdup_n(static_cast<T>(0), ExactTagType{});
+
+ /* Loop over row and compute exponentials and sum */
+ int x = 0;
+ for(; x <= (input_width - vec_size); x += vec_size)
+ {
+ auto vec_elements = wrapper::vloadq(in_ptr + x);
+ vec_elements = wrapper::vsub(vec_elements, vec_max);
+ if(is_log)
+ {
+ vec_elements = wrapper::vmul(vec_elements, wrapper::vdup_n(static_cast<T>(beta), ExactTagType{}));
+ vec_sum = wrapper::vadd(vec_sum, wrapper::vexpq(vec_elements));
+ }
+ else
+ {
+ vec_elements = wrapper::vexpq(wrapper::vmul(vec_elements, wrapper::vdup_n(static_cast<T>(beta), ExactTagType{})));
+ vec_sum = wrapper::vadd(vec_sum, vec_elements);
+ }
+ wrapper::vstore(tmp_ptr + x, vec_elements);
+ }
+
+ /* Reduce sum */
+ auto sum_res = wrapper::vpadd(wrapper::vgethigh(vec_sum), wrapper::vgetlow(vec_sum));
+ for(int i = 0; i < sum_stages; ++i)
+ {
+ sum_res = wrapper::vpadd(sum_res, sum_res);
+ }
+ sum = wrapper::vgetlane(sum_res, 0);
+
+ /* Run remaining elements */
+ for(; x < input_width; ++x)
+ {
+ T element{};
+
+ if(is_log)
+ {
+ element = (in_ptr[x] - max_val) * beta;
+ sum += std::exp(element);
+ }
+ else
+ {
+ element = std::exp((in_ptr[x] - max_val) * beta);
+ sum += element;
+ }
+ tmp_ptr[x] = element;
+ }
+
+ if(!is_log)
+ {
+ sum_inversed = T(1) / sum;
+ }
+ else
+ {
+ sum = static_cast<T>(std::log(sum));
+ }
+ }
+
+ /* Normalize exponentials */
+ {
+ /* Loop over row and compute softmax */
+ int x = 0;
+ for(; x <= (input_width - vec_size); x += vec_size)
+ {
+ auto vec_in = wrapper::vloadq(tmp_ptr + x);
+ auto normalized_value = wrapper::vdup_n(static_cast<T>(0), ExactTagType{});
+ if(is_log)
+ {
+ normalized_value = wrapper::vsub(vec_in, wrapper::vdup_n(static_cast<T>(sum), ExactTagType{}));
+ }
+ else
+ {
+ normalized_value = wrapper::vmul(vec_in, wrapper::vdup_n(static_cast<T>(sum_inversed), ExactTagType{}));
+ }
+ wrapper::vstore(out_ptr + x, normalized_value);
+ }
+ /* Run remaining elements */
+ for(; x < input_width; ++x)
+ {
+ if(is_log)
+ {
+ out_ptr[x] = tmp_ptr[x] - sum;
+ }
+ else
+ {
+ out_ptr[x] = tmp_ptr[x] * sum_inversed;
+ }
+ }
+ }
+ },
+ in_it, max_it, out_it);
+}
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+template void neon_softmax_logits_1d_float<float16_t>(const ITensor *in, const ITensor *max, void *const tmp,
+ ITensor *out, const float beta, bool is_log, const Window &window);
+#endif //defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+template void neon_softmax_logits_1d_float<float>(const ITensor *in, const ITensor *max, void *const tmp,
+ ITensor *out, const float beta, bool is_log, const Window &window);
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/softmax/generic/neon/impl.h b/src/cpu/kernels/softmax/generic/neon/impl.h
index 325e127f3b..6ca659919a 100644
--- a/src/cpu/kernels/softmax/generic/neon/impl.h
+++ b/src/cpu/kernels/softmax/generic/neon/impl.h
@@ -24,363 +24,22 @@
#ifndef SRC_CORE_NEON_KERNELS_SOFTMAX_IMPL_H
#define SRC_CORE_NEON_KERNELS_SOFTMAX_IMPL_H
-#include "src/core/NEON/NEMath.h"
-#include "src/core/NEON/wrapper/wrapper.h"
-#include "support/SaturateCast.h"
+#include "arm_compute/core/Helpers.h"
namespace arm_compute
{
namespace cpu
{
template <typename T>
-void neon_logits_1d_max(const ITensor *in, ITensor *out, const Window &window)
-{
- /** SIMD vector tag type. */
- using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
-
- constexpr int window_step_x = 16 / sizeof(T);
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
-
- Window win{ window };
- win.set(Window::DimX, Window::Dimension(0, 1, 1));
- Iterator input(in, win);
- Iterator output(out, win);
-
- const int sum_stages = log2(window_step_x / 2);
- execute_window_loop(win, [&](const Coordinates &)
- {
- // Get pointers
- const auto in_ptr = reinterpret_cast<const T *>(input.ptr());
- const auto out_ptr = reinterpret_cast<T *>(output.ptr());
-
- // Init max value
- auto vec_max = wrapper::vdup_n(support::cpp11::lowest<T>(), ExactTagType{});
- int x = window_start_x;
-
- for(; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const auto current_value = wrapper::vloadq(in_ptr + x);
- vec_max = wrapper::vmax(vec_max, current_value);
- }
- auto carry_max = wrapper::vpmax(wrapper::vgethigh(vec_max), wrapper::vgetlow(vec_max));
-
- for(int i = 0; i < sum_stages; ++i)
- {
- carry_max = wrapper::vpmax(carry_max, carry_max);
- }
- T max_val = wrapper::vgetlane(carry_max, 0);
-
- // Compute left-over elements
- for(; x < window_end_x; ++x)
- {
- max_val = *(in_ptr + x) > max_val ? *(in_ptr + x) : max_val;
- }
-
- *out_ptr = max_val;
- },
- input, output);
-}
+void neon_logits_1d_max(const ITensor *in, ITensor *out, const Window &window);
template <typename T>
void neon_softmax_logits_1d_quantized(const ITensor *in, const ITensor *max, void *const tmp,
- ITensor *out, float beta, bool is_log, const Window &window)
-{
- static_assert(std::is_same<T, qasymm8_t>::value
- || std::is_same<T, qasymm8_signed_t>::value,
- "quantized type should be either qasymm8_t or qasymm8_signed_t.");
-
- const int start_x = in->info()->valid_region().anchor.x();
- const int input_width = in->info()->valid_region().shape.x();
-
- const float scale_beta = -beta * in->info()->quantization_info().uniform().scale;
- const auto scale_beta_vec = vdupq_n_f32(scale_beta);
-
- Iterator in_it(in, window);
- Iterator max_it(max, window);
- Iterator out_it(out, window);
- constexpr int vec_size = 16;
-
- execute_window_loop(window, [&](const Coordinates &)
- {
- /* Get pointers */
- const auto in_ptr = reinterpret_cast<const T *>(in_it.ptr()) + start_x;
- const auto out_ptr = reinterpret_cast<T *>(out_it.ptr()) + start_x;
- const auto tmp_ptr = reinterpret_cast<float *>(tmp);
-
- float sum{};
- float sum_inversed{};
-
- /* Compute exponentials and sum */
- {
- /* Get max value */
- const auto max_val = *reinterpret_cast<const T *>(max_it.ptr());
- const auto vec_max = wrapper::vdup_n(max_val, wrapper::traits::vector_128_tag{});
-
- /* Init sum to zero */
- float32x4x4_t vec_sum =
- {
- vdupq_n_f32(0.f),
- vdupq_n_f32(0.f),
- vdupq_n_f32(0.f),
- vdupq_n_f32(0.f),
- };
-
- /* Loop over row and compute exponentials and sum */
- int x = 0;
- for(; x <= (input_width - vec_size); x += vec_size)
- {
- auto vec_elements = wrapper::vloadq(in_ptr + x);
- vec_elements = wrapper::vqsub(vec_max, vec_elements);
- auto vec_elements_flt = convert_int_to_float<float32x4x4_t>(vec_elements);
-
- if(is_log)
- {
- vec_elements_flt.val[0] = vmulq_f32(vec_elements_flt.val[0], scale_beta_vec);
- vec_elements_flt.val[1] = vmulq_f32(vec_elements_flt.val[1], scale_beta_vec);
- vec_elements_flt.val[2] = vmulq_f32(vec_elements_flt.val[2], scale_beta_vec);
- vec_elements_flt.val[3] = vmulq_f32(vec_elements_flt.val[3], scale_beta_vec);
- vec_sum.val[0] = vaddq_f32(vec_sum.val[0], vexpq_f32(vec_elements_flt.val[0]));
- vec_sum.val[1] = vaddq_f32(vec_sum.val[1], vexpq_f32(vec_elements_flt.val[1]));
- vec_sum.val[2] = vaddq_f32(vec_sum.val[2], vexpq_f32(vec_elements_flt.val[2]));
- vec_sum.val[3] = vaddq_f32(vec_sum.val[3], vexpq_f32(vec_elements_flt.val[3]));
- }
- else
- {
- vec_elements_flt.val[0] = vexpq_f32(vmulq_f32(vec_elements_flt.val[0], scale_beta_vec));
- vec_elements_flt.val[1] = vexpq_f32(vmulq_f32(vec_elements_flt.val[1], scale_beta_vec));
- vec_elements_flt.val[2] = vexpq_f32(vmulq_f32(vec_elements_flt.val[2], scale_beta_vec));
- vec_elements_flt.val[3] = vexpq_f32(vmulq_f32(vec_elements_flt.val[3], scale_beta_vec));
- vec_sum.val[0] = vaddq_f32(vec_sum.val[0], vec_elements_flt.val[0]);
- vec_sum.val[1] = vaddq_f32(vec_sum.val[1], vec_elements_flt.val[1]);
- vec_sum.val[2] = vaddq_f32(vec_sum.val[2], vec_elements_flt.val[2]);
- vec_sum.val[3] = vaddq_f32(vec_sum.val[3], vec_elements_flt.val[3]);
- }
-
- vst4q_f32(tmp_ptr + x, vec_elements_flt);
- }
-
- /* Reduce sum */
- const auto sum_16_byte = vaddq_f32(vaddq_f32(vec_sum.val[0], vec_sum.val[1]), vaddq_f32(vec_sum.val[2], vec_sum.val[3]));
- auto sum_res = vpadd_f32(vget_high_f32(sum_16_byte), vget_low_f32(sum_16_byte));
- sum_res = vpadd_f32(sum_res, sum_res);
- sum = wrapper::vgetlane(sum_res, 0);
-
- /* Run remaining elements */
- for(; x < input_width; ++x)
- {
- float element{};
- if(is_log)
- {
- element = (max_val - in_ptr[x]) * scale_beta;
- sum += std::exp(element);
- }
- else
- {
- element = std::exp((max_val - in_ptr[x]) * scale_beta);
- sum += element;
- }
-
- tmp_ptr[x] = element;
- }
-
- if(!is_log)
- {
- sum_inversed = 256.f / sum;
- }
- else
- {
- sum = std::log(sum);
- }
- }
-
- /* Normalize exponentials */
- {
- constexpr bool is_qasymm8_signed = std::is_same<T, qasymm8_signed_t>::value;
- /* Loop over row and compute softmax */
- int x = 0;
- for(; x <= (input_width - vec_size); x += vec_size)
- {
- using int_vec_type = wrapper::traits::neon_vector_t<T, 16>;
- float32x4x4_t vec_in = vld4q_f32(tmp_ptr + x);
- int_vec_type normalized_value{};
- if(is_log)
- {
- const float32x4x4_t sub =
- {
- vsubq_f32(vec_in.val[0], vdupq_n_f32(sum)),
- vsubq_f32(vec_in.val[1], vdupq_n_f32(sum)),
- vsubq_f32(vec_in.val[2], vdupq_n_f32(sum)),
- vsubq_f32(vec_in.val[3], vdupq_n_f32(sum)),
- };
- normalized_value = convert_float_to_int<float32x4x4_t, int_vec_type>(sub);
- }
- else
- {
- float32x4x4_t mul =
- {
- vmulq_f32(vec_in.val[0], vdupq_n_f32(sum_inversed)),
- vmulq_f32(vec_in.val[1], vdupq_n_f32(sum_inversed)),
- vmulq_f32(vec_in.val[2], vdupq_n_f32(sum_inversed)),
- vmulq_f32(vec_in.val[3], vdupq_n_f32(sum_inversed)),
- };
-
- if(is_qasymm8_signed)
- {
- const auto offset_vec = wrapper::vdup_n(128.f, wrapper::traits::vector_128_tag{});
- mul.val[0] = wrapper::vsub(mul.val[0], offset_vec);
- mul.val[1] = wrapper::vsub(mul.val[1], offset_vec);
- mul.val[2] = wrapper::vsub(mul.val[2], offset_vec);
- mul.val[3] = wrapper::vsub(mul.val[3], offset_vec);
- }
-
- normalized_value = convert_float_to_int<float32x4x4_t, int_vec_type>(mul);
- }
- wrapper::vstore(out_ptr + x, normalized_value);
- }
- /* Run remaining elements */
- for(; x < input_width; ++x)
- {
- if(is_log)
- {
- out_ptr[x] = utils::cast::saturate_cast<T>(tmp_ptr[x] - sum);
- }
- else
- {
- out_ptr[x] = utils::cast::saturate_cast<T>((tmp_ptr[x] * sum_inversed) - (is_qasymm8_signed ? 128.f : 0));
- }
- }
- }
- },
- in_it, max_it, out_it);
-}
+ ITensor *out, float beta, bool is_log, const Window &window);
template <typename T>
void neon_softmax_logits_1d_float(const ITensor *in, const ITensor *max, void *const tmp,
- ITensor *out, const float beta, bool is_log, const Window &window)
-{
- const int start_x = in->info()->valid_region().anchor.x();
- const int input_width = in->info()->valid_region().shape.x();
-
- Iterator in_it(in, window);
- Iterator max_it(max, window);
- Iterator out_it(out, window);
-
- /** SIMD vector tag type. */
- using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
-
- constexpr int vec_size = 16 / sizeof(T);
- const int sum_stages = log2(vec_size / 2);
-
- execute_window_loop(window, [&](const Coordinates &)
- {
- /* Get pointers */
- const auto in_ptr = reinterpret_cast<const T *>(in_it.ptr()) + start_x;
- const auto out_ptr = reinterpret_cast<T *>(out_it.ptr()) + start_x;
- const auto tmp_ptr = reinterpret_cast<T *>(tmp);
-
- T sum{};
- T sum_inversed{};
-
- /* Compute exponentials and sum */
- {
- /* Get max value */
- const auto max_val = *reinterpret_cast<const T *>(max_it.ptr());
- const auto vec_max = wrapper::vdup_n(max_val, ExactTagType{});
-
- /* Init sum to zero */
- auto vec_sum = wrapper::vdup_n(static_cast<T>(0), ExactTagType{});
-
- /* Loop over row and compute exponentials and sum */
- int x = 0;
- for(; x <= (input_width - vec_size); x += vec_size)
- {
- auto vec_elements = wrapper::vloadq(in_ptr + x);
- vec_elements = wrapper::vsub(vec_elements, vec_max);
- if(is_log)
- {
- vec_elements = wrapper::vmul(vec_elements, wrapper::vdup_n(static_cast<T>(beta), ExactTagType{}));
- vec_sum = wrapper::vadd(vec_sum, wrapper::vexpq(vec_elements));
- }
- else
- {
- vec_elements = wrapper::vexpq(wrapper::vmul(vec_elements, wrapper::vdup_n(static_cast<T>(beta), ExactTagType{})));
- vec_sum = wrapper::vadd(vec_sum, vec_elements);
- }
- wrapper::vstore(tmp_ptr + x, vec_elements);
- }
-
- /* Reduce sum */
- auto sum_res = wrapper::vpadd(wrapper::vgethigh(vec_sum), wrapper::vgetlow(vec_sum));
- for(int i = 0; i < sum_stages; ++i)
- {
- sum_res = wrapper::vpadd(sum_res, sum_res);
- }
- sum = wrapper::vgetlane(sum_res, 0);
-
- /* Run remaining elements */
- for(; x < input_width; ++x)
- {
- T element{};
-
- if(is_log)
- {
- element = (in_ptr[x] - max_val) * beta;
- sum += std::exp(element);
- }
- else
- {
- element = std::exp((in_ptr[x] - max_val) * beta);
- sum += element;
- }
- tmp_ptr[x] = element;
- }
-
- if(!is_log)
- {
- sum_inversed = T(1) / sum;
- }
- else
- {
- sum = static_cast<T>(std::log(sum));
- }
- }
-
- /* Normalize exponentials */
- {
- /* Loop over row and compute softmax */
- int x = 0;
- for(; x <= (input_width - vec_size); x += vec_size)
- {
- auto vec_in = wrapper::vloadq(tmp_ptr + x);
- auto normalized_value = wrapper::vdup_n(static_cast<T>(0), ExactTagType{});
- if(is_log)
- {
- normalized_value = wrapper::vsub(vec_in, wrapper::vdup_n(static_cast<T>(sum), ExactTagType{}));
- }
- else
- {
- normalized_value = wrapper::vmul(vec_in, wrapper::vdup_n(static_cast<T>(sum_inversed), ExactTagType{}));
- }
- wrapper::vstore(out_ptr + x, normalized_value);
- }
- /* Run remaining elements */
- for(; x < input_width; ++x)
- {
- if(is_log)
- {
- out_ptr[x] = tmp_ptr[x] - sum;
- }
- else
- {
- out_ptr[x] = tmp_ptr[x] * sum_inversed;
- }
- }
- }
- },
- in_it, max_it, out_it);
-}
-
+ ITensor *out, const float beta, bool is_log, const Window &window);
} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/kernels/softmax/generic/sve2/impl.cpp b/src/cpu/kernels/softmax/generic/sve2/impl.cpp
new file mode 100644
index 0000000000..9cdfe61446
--- /dev/null
+++ b/src/cpu/kernels/softmax/generic/sve2/impl.cpp
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2021-2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/cpu/kernels/softmax/generic/sve2/impl.h"
+#include "arm_compute/core/Types.h"
+#include "src/core/NEON/wrapper/wrapper.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+template <typename ScalarType>
+void sve2_softmax_logits_1d_quantized(const ITensor *in, const ITensor *max, void *const tmp,
+ ITensor *out, float beta, bool is_log, const Window &window)
+{
+ const int start_x = in->info()->valid_region().anchor.x();
+ const int input_width = in->info()->valid_region().shape.x();
+
+ const float scale_beta = -beta * in->info()->quantization_info().uniform().scale;
+ const auto scale_beta_vec = svdup_n_f32(scale_beta);
+
+ Iterator in_it(in, window);
+ Iterator max_it(max, window);
+ Iterator out_it(out, window);
+ const auto all_true_pg = wrapper::svptrue<ScalarType>();
+ using SVEType = typename wrapper::traits::sve_vector<ScalarType>::type;
+
+ const int inc_1 = static_cast<int>(svcntw());
+ const int inc_2 = static_cast<int>(2 * svcntw());
+ const int inc_3 = static_cast<int>(3 * svcntw());
+
+ execute_window_loop(window, [&](const Coordinates &)
+ {
+ /* Get pointers */
+ const auto in_ptr = reinterpret_cast<const ScalarType *>(in_it.ptr()) + start_x;
+ const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr()) + start_x;
+ const auto tmp_ptr = reinterpret_cast<float *>(tmp);
+
+ float sum{};
+
+ /* Compute exponentials and sum */
+ {
+ /* Get max value */
+ const auto max_val = *reinterpret_cast<const ScalarType *>(max_it.ptr());
+ const auto vec_max = wrapper::svdup_n(max_val);
+
+ /* Init sum to zero */
+ auto vec_sum_0 = svdup_n_f32(0.f);
+ auto vec_sum_1 = svdup_n_f32(0.f);
+ auto vec_sum_2 = svdup_n_f32(0.f);
+ auto vec_sum_3 = svdup_n_f32(0.f);
+
+ /* Loop over row and compute exponentials and sum */
+ int x = 0;
+ svbool_t pg = wrapper::svwhilelt<ScalarType>(x, input_width);
+ svbool_t pg_0 = svunpklo(svunpklo(pg));
+ svbool_t pg_1 = svunpkhi(svunpklo(pg));
+ svbool_t pg_2 = svunpklo(svunpkhi(pg));
+ svbool_t pg_3 = svunpkhi(svunpkhi(pg));
+ do
+ {
+ auto vec_elements = svld1(pg, in_ptr + x);
+ vec_elements = svsub_z(pg, vec_max, vec_elements);
+
+ auto vec_elements_flt_0 = svcvt_f32_z(pg_0, svunpklo(svunpklo(vec_elements)));
+ auto vec_elements_flt_1 = svcvt_f32_z(pg_1, svunpkhi(svunpklo(vec_elements)));
+ auto vec_elements_flt_2 = svcvt_f32_z(pg_2, svunpklo(svunpkhi(vec_elements)));
+ auto vec_elements_flt_3 = svcvt_f32_z(pg_3, svunpkhi(svunpkhi(vec_elements)));
+
+ if(is_log)
+ {
+ vec_elements_flt_0 = svmul_f32_z(pg_0, vec_elements_flt_0, scale_beta_vec);
+ vec_elements_flt_1 = svmul_f32_z(pg_1, vec_elements_flt_1, scale_beta_vec);
+ vec_elements_flt_2 = svmul_f32_z(pg_2, vec_elements_flt_2, scale_beta_vec);
+ vec_elements_flt_3 = svmul_f32_z(pg_3, vec_elements_flt_3, scale_beta_vec);
+ vec_sum_0 = svadd_f32_m(pg_0, vec_sum_0, svexp_f32_z(pg_0, vec_elements_flt_0));
+ vec_sum_1 = svadd_f32_m(pg_1, vec_sum_1, svexp_f32_z(pg_1, vec_elements_flt_1));
+ vec_sum_2 = svadd_f32_m(pg_2, vec_sum_2, svexp_f32_z(pg_2, vec_elements_flt_2));
+ vec_sum_3 = svadd_f32_m(pg_3, vec_sum_3, svexp_f32_z(pg_3, vec_elements_flt_3));
+ }
+ else
+ {
+ vec_elements_flt_0 = svexp_f32_z(pg_0, svmul_f32_z(pg_0, vec_elements_flt_0, scale_beta_vec));
+ vec_elements_flt_1 = svexp_f32_z(pg_1, svmul_f32_z(pg_1, vec_elements_flt_1, scale_beta_vec));
+ vec_elements_flt_2 = svexp_f32_z(pg_2, svmul_f32_z(pg_2, vec_elements_flt_2, scale_beta_vec));
+ vec_elements_flt_3 = svexp_f32_z(pg_3, svmul_f32_z(pg_3, vec_elements_flt_3, scale_beta_vec));
+ vec_sum_0 = svadd_f32_m(pg_0, vec_sum_0, vec_elements_flt_0);
+ vec_sum_1 = svadd_f32_m(pg_1, vec_sum_1, vec_elements_flt_1);
+ vec_sum_2 = svadd_f32_m(pg_2, vec_sum_2, vec_elements_flt_2);
+ vec_sum_3 = svadd_f32_m(pg_3, vec_sum_3, vec_elements_flt_3);
+ }
+
+ svst1_f32(pg_0, tmp_ptr + x, vec_elements_flt_0);
+ svst1_f32(pg_1, tmp_ptr + x + inc_1, vec_elements_flt_1);
+ svst1_f32(pg_2, tmp_ptr + x + inc_2, vec_elements_flt_2);
+ svst1_f32(pg_3, tmp_ptr + x + inc_3, vec_elements_flt_3);
+
+ x += wrapper::svcnt<ScalarType>();
+ pg = wrapper::svwhilelt<ScalarType>(x, input_width);
+ pg_0 = svunpklo(svunpklo(pg));
+ pg_1 = svunpkhi(svunpklo(pg));
+ pg_2 = svunpklo(svunpkhi(pg));
+ pg_3 = svunpkhi(svunpkhi(pg));
+ }
+ while(svptest_any(all_true_pg, pg));
+
+ /* Reduce sum */
+ const auto vec_sum = svadd_f32_z(all_true_pg, svadd_f32_z(all_true_pg, vec_sum_0, vec_sum_1), svadd_f32_z(all_true_pg, vec_sum_2, vec_sum_3));
+ sum = svaddv_f32(all_true_pg, vec_sum);
+
+ /* Run remaining elements */
+ x = 0;
+ if(is_log)
+ {
+ sum = std::log(sum);
+ }
+ else
+ {
+ sum = 256.f / sum;
+ }
+ }
+
+ /* Normalize exponentials */
+ {
+ constexpr bool is_qasymm8_signed = std::is_same<ScalarType, qasymm8_signed_t>::value;
+ /* Loop over row and compute softmax */
+ int x = 0;
+ svbool_t pg = wrapper::svwhilelt<ScalarType>(x, input_width);
+ svbool_t pg_0 = svunpklo(svunpklo(pg));
+ svbool_t pg_1 = svunpkhi(svunpklo(pg));
+ svbool_t pg_2 = svunpklo(svunpkhi(pg));
+ svbool_t pg_3 = svunpkhi(svunpkhi(pg));
+ do
+ {
+ auto vec_in_0 = svld1_f32(pg_0, tmp_ptr + x);
+ auto vec_in_1 = svld1_f32(pg_1, tmp_ptr + x + inc_1);
+ auto vec_in_2 = svld1_f32(pg_2, tmp_ptr + x + inc_2);
+ auto vec_in_3 = svld1_f32(pg_3, tmp_ptr + x + inc_3);
+
+ svfloat32_t res_0{};
+ svfloat32_t res_1{};
+ svfloat32_t res_2{};
+ svfloat32_t res_3{};
+
+ if(is_log)
+ {
+ res_0 = svsub_f32_z(pg_0, vec_in_0, svdup_n_f32(sum));
+ res_1 = svsub_f32_z(pg_1, vec_in_1, svdup_n_f32(sum));
+ res_2 = svsub_f32_z(pg_2, vec_in_2, svdup_n_f32(sum));
+ res_3 = svsub_f32_z(pg_3, vec_in_3, svdup_n_f32(sum));
+ }
+ else
+ {
+ res_0 = svmul_f32_z(pg_0, vec_in_0, svdup_n_f32(sum));
+ res_1 = svmul_f32_z(pg_1, vec_in_1, svdup_n_f32(sum));
+ res_2 = svmul_f32_z(pg_2, vec_in_2, svdup_n_f32(sum));
+ res_3 = svmul_f32_z(pg_3, vec_in_3, svdup_n_f32(sum));
+
+ if(is_qasymm8_signed)
+ {
+ const auto offset_vec = svdup_n_f32(128.f);
+ res_0 = svsub_z(pg_0, vec_in_0, offset_vec);
+ res_1 = svsub_z(pg_1, vec_in_1, offset_vec);
+ res_2 = svsub_z(pg_2, vec_in_2, offset_vec);
+ res_3 = svsub_z(pg_3, vec_in_3, offset_vec);
+ }
+ }
+
+ // Store value
+ const auto out = convert_float_to_int<SVEType>(res_0, res_1, res_2, res_3);
+ svst1(pg, out_ptr + x, out);
+ x += wrapper::svcnt<ScalarType>();
+ pg = wrapper::svwhilelt<ScalarType>(x, input_width);
+ pg_0 = svunpklo(svunpklo(pg));
+ pg_1 = svunpkhi(svunpklo(pg));
+ pg_2 = svunpklo(svunpkhi(pg));
+ pg_3 = svunpkhi(svunpkhi(pg));
+ }
+ while(svptest_any(all_true_pg, pg));
+ }
+ },
+ in_it, max_it, out_it);
+}
+
+template void sve2_softmax_logits_1d_quantized<qasymm8_signed_t>(const ITensor *in, const ITensor *max, void *const tmp,
+ ITensor *out, float beta, bool is_log, const Window &window);
+template void sve2_softmax_logits_1d_quantized<qasymm8_t>(const ITensor *in, const ITensor *max, void *const tmp,
+ ITensor *out, float beta, bool is_log, const Window &window);
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/softmax/generic/sve2/impl.h b/src/cpu/kernels/softmax/generic/sve2/impl.h
index 16dde2b115..abbcc15181 100644
--- a/src/cpu/kernels/softmax/generic/sve2/impl.h
+++ b/src/cpu/kernels/softmax/generic/sve2/impl.h
@@ -24,9 +24,7 @@
#ifndef SRC_CORE_SVE2_KERNELS_SOFTMAX_IMPL_H
#define SRC_CORE_SVE2_KERNELS_SOFTMAX_IMPL_H
-#if defined(ARM_COMPUTE_ENABLE_SVE2)
-#include "arm_compute/core/Types.h"
-#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
+#include "arm_compute/core/Helpers.h"
namespace arm_compute
{
@@ -34,177 +32,7 @@ namespace cpu
{
template <typename ScalarType>
void sve2_softmax_logits_1d_quantized(const ITensor *in, const ITensor *max, void *const tmp,
- ITensor *out, float beta, bool is_log, const Window &window)
-{
- const int start_x = in->info()->valid_region().anchor.x();
- const int input_width = in->info()->valid_region().shape.x();
-
- const float scale_beta = -beta * in->info()->quantization_info().uniform().scale;
- const auto scale_beta_vec = svdup_n_f32(scale_beta);
-
- Iterator in_it(in, window);
- Iterator max_it(max, window);
- Iterator out_it(out, window);
- const auto all_true_pg = wrapper::svptrue<ScalarType>();
- using SVEType = typename wrapper::traits::sve_vector<ScalarType>::type;
-
- const int inc_1 = static_cast<int>(svcntw());
- const int inc_2 = static_cast<int>(2 * svcntw());
- const int inc_3 = static_cast<int>(3 * svcntw());
-
- execute_window_loop(window, [&](const Coordinates &)
- {
- /* Get pointers */
- const auto in_ptr = reinterpret_cast<const ScalarType *>(in_it.ptr()) + start_x;
- const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr()) + start_x;
- const auto tmp_ptr = reinterpret_cast<float *>(tmp);
-
- float sum{};
-
- /* Compute exponentials and sum */
- {
- /* Get max value */
- const auto max_val = *reinterpret_cast<const ScalarType *>(max_it.ptr());
- const auto vec_max = wrapper::svdup_n(max_val);
-
- /* Init sum to zero */
- auto vec_sum_0 = svdup_n_f32(0.f);
- auto vec_sum_1 = svdup_n_f32(0.f);
- auto vec_sum_2 = svdup_n_f32(0.f);
- auto vec_sum_3 = svdup_n_f32(0.f);
-
- /* Loop over row and compute exponentials and sum */
- int x = 0;
- svbool_t pg = wrapper::svwhilelt<ScalarType>(x, input_width);
- svbool_t pg_0 = svunpklo(svunpklo(pg));
- svbool_t pg_1 = svunpkhi(svunpklo(pg));
- svbool_t pg_2 = svunpklo(svunpkhi(pg));
- svbool_t pg_3 = svunpkhi(svunpkhi(pg));
- do
- {
- auto vec_elements = svld1(pg, in_ptr + x);
- vec_elements = svsub_z(pg, vec_max, vec_elements);
-
- auto vec_elements_flt_0 = svcvt_f32_z(pg_0, svunpklo(svunpklo(vec_elements)));
- auto vec_elements_flt_1 = svcvt_f32_z(pg_1, svunpkhi(svunpklo(vec_elements)));
- auto vec_elements_flt_2 = svcvt_f32_z(pg_2, svunpklo(svunpkhi(vec_elements)));
- auto vec_elements_flt_3 = svcvt_f32_z(pg_3, svunpkhi(svunpkhi(vec_elements)));
-
- if(is_log)
- {
- vec_elements_flt_0 = svmul_f32_z(pg_0, vec_elements_flt_0, scale_beta_vec);
- vec_elements_flt_1 = svmul_f32_z(pg_1, vec_elements_flt_1, scale_beta_vec);
- vec_elements_flt_2 = svmul_f32_z(pg_2, vec_elements_flt_2, scale_beta_vec);
- vec_elements_flt_3 = svmul_f32_z(pg_3, vec_elements_flt_3, scale_beta_vec);
- vec_sum_0 = svadd_f32_m(pg_0, vec_sum_0, svexp_f32_z(pg_0, vec_elements_flt_0));
- vec_sum_1 = svadd_f32_m(pg_1, vec_sum_1, svexp_f32_z(pg_1, vec_elements_flt_1));
- vec_sum_2 = svadd_f32_m(pg_2, vec_sum_2, svexp_f32_z(pg_2, vec_elements_flt_2));
- vec_sum_3 = svadd_f32_m(pg_3, vec_sum_3, svexp_f32_z(pg_3, vec_elements_flt_3));
- }
- else
- {
- vec_elements_flt_0 = svexp_f32_z(pg_0, svmul_f32_z(pg_0, vec_elements_flt_0, scale_beta_vec));
- vec_elements_flt_1 = svexp_f32_z(pg_1, svmul_f32_z(pg_1, vec_elements_flt_1, scale_beta_vec));
- vec_elements_flt_2 = svexp_f32_z(pg_2, svmul_f32_z(pg_2, vec_elements_flt_2, scale_beta_vec));
- vec_elements_flt_3 = svexp_f32_z(pg_3, svmul_f32_z(pg_3, vec_elements_flt_3, scale_beta_vec));
- vec_sum_0 = svadd_f32_m(pg_0, vec_sum_0, vec_elements_flt_0);
- vec_sum_1 = svadd_f32_m(pg_1, vec_sum_1, vec_elements_flt_1);
- vec_sum_2 = svadd_f32_m(pg_2, vec_sum_2, vec_elements_flt_2);
- vec_sum_3 = svadd_f32_m(pg_3, vec_sum_3, vec_elements_flt_3);
- }
-
- svst1_f32(pg_0, tmp_ptr + x, vec_elements_flt_0);
- svst1_f32(pg_1, tmp_ptr + x + inc_1, vec_elements_flt_1);
- svst1_f32(pg_2, tmp_ptr + x + inc_2, vec_elements_flt_2);
- svst1_f32(pg_3, tmp_ptr + x + inc_3, vec_elements_flt_3);
-
- x += wrapper::svcnt<ScalarType>();
- pg = wrapper::svwhilelt<ScalarType>(x, input_width);
- pg_0 = svunpklo(svunpklo(pg));
- pg_1 = svunpkhi(svunpklo(pg));
- pg_2 = svunpklo(svunpkhi(pg));
- pg_3 = svunpkhi(svunpkhi(pg));
- }
- while(svptest_any(all_true_pg, pg));
-
- /* Reduce sum */
- const auto vec_sum = svadd_f32_z(all_true_pg, svadd_f32_z(all_true_pg, vec_sum_0, vec_sum_1), svadd_f32_z(all_true_pg, vec_sum_2, vec_sum_3));
- sum = svaddv_f32(all_true_pg, vec_sum);
-
- /* Run remaining elements */
- x = 0;
- if(is_log)
- {
- sum = std::log(sum);
- }
- else
- {
- sum = 256.f / sum;
- }
- }
-
- /* Normalize exponentials */
- {
- constexpr bool is_qasymm8_signed = std::is_same<ScalarType, qasymm8_signed_t>::value;
- /* Loop over row and compute softmax */
- int x = 0;
- svbool_t pg = wrapper::svwhilelt<ScalarType>(x, input_width);
- svbool_t pg_0 = svunpklo(svunpklo(pg));
- svbool_t pg_1 = svunpkhi(svunpklo(pg));
- svbool_t pg_2 = svunpklo(svunpkhi(pg));
- svbool_t pg_3 = svunpkhi(svunpkhi(pg));
- do
- {
- auto vec_in_0 = svld1_f32(pg_0, tmp_ptr + x);
- auto vec_in_1 = svld1_f32(pg_1, tmp_ptr + x + inc_1);
- auto vec_in_2 = svld1_f32(pg_2, tmp_ptr + x + inc_2);
- auto vec_in_3 = svld1_f32(pg_3, tmp_ptr + x + inc_3);
-
- svfloat32_t res_0{};
- svfloat32_t res_1{};
- svfloat32_t res_2{};
- svfloat32_t res_3{};
-
- if(is_log)
- {
- res_0 = svsub_f32_z(pg_0, vec_in_0, svdup_n_f32(sum));
- res_1 = svsub_f32_z(pg_1, vec_in_1, svdup_n_f32(sum));
- res_2 = svsub_f32_z(pg_2, vec_in_2, svdup_n_f32(sum));
- res_3 = svsub_f32_z(pg_3, vec_in_3, svdup_n_f32(sum));
- }
- else
- {
- res_0 = svmul_f32_z(pg_0, vec_in_0, svdup_n_f32(sum));
- res_1 = svmul_f32_z(pg_1, vec_in_1, svdup_n_f32(sum));
- res_2 = svmul_f32_z(pg_2, vec_in_2, svdup_n_f32(sum));
- res_3 = svmul_f32_z(pg_3, vec_in_3, svdup_n_f32(sum));
-
- if(is_qasymm8_signed)
- {
- const auto offset_vec = svdup_n_f32(128.f);
- res_0 = svsub_z(pg_0, vec_in_0, offset_vec);
- res_1 = svsub_z(pg_1, vec_in_1, offset_vec);
- res_2 = svsub_z(pg_2, vec_in_2, offset_vec);
- res_3 = svsub_z(pg_3, vec_in_3, offset_vec);
- }
- }
-
- // Store value
- const auto out = convert_float_to_int<SVEType>(res_0, res_1, res_2, res_3);
- svst1(pg, out_ptr + x, out);
- x += wrapper::svcnt<ScalarType>();
- pg = wrapper::svwhilelt<ScalarType>(x, input_width);
- pg_0 = svunpklo(svunpklo(pg));
- pg_1 = svunpkhi(svunpklo(pg));
- pg_2 = svunpklo(svunpkhi(pg));
- pg_3 = svunpkhi(svunpkhi(pg));
- }
- while(svptest_any(all_true_pg, pg));
- }
- },
- in_it, max_it, out_it);
-}
+ ITensor *out, float beta, bool is_log, const Window &window);
} // namespace cpu
} // namespace arm_compute
-#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
#endif /* SRC_CORE_SVE2_KERNELS_SOFTMAX_IMPL_H */