aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorViet-Hoa Do <viet-hoa.do@arm.com>2023-03-15 14:05:06 +0000
committerViet-Hoa Do <viet-hoa.do@arm.com>2023-03-29 14:03:30 +0000
commitfd472f05dc73005a89a5e6275940ab5c9a609485 (patch)
tree4a00f42f64f4bea72c489961aaa376665d324c60
parent5a7d1571a2de24eefc6f1d8d22deeef9f47521ee (diff)
downloadComputeLibrary-fd472f05dc73005a89a5e6275940ab5c9a609485.tar.gz
Add quantized support for unary elementwise in CPU
* Add quantized unary elementwise in CPU using LUT. * Widen the input data range of the test suite. - Fix CPU exponential function overflow/underflow range. - Fix saturation issue of CL round operator. Resolves: COMPMID-5763 Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com> Change-Id: I41445de2b4a33ec6b01e0ab701516c240c852d0b Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9367 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Reviewed-by: Pablo Marquez Tello <pablo.tello@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp2
-rw-r--r--filelist.json23
-rw-r--r--src/BUILD.bazel4
-rw-r--r--src/CMakeLists.txt5
-rw-r--r--src/core/CL/cl_kernels/common/elementwise_unary.cl8
-rw-r--r--src/core/NEON/NEMath.inl6
-rw-r--r--src/core/NEON/SVEMath.inl6
-rw-r--r--src/cpu/kernels/CpuElementwiseUnaryKernel.cpp108
-rw-r--r--src/cpu/kernels/CpuElementwiseUnaryKernel.h7
-rw-r--r--src/cpu/kernels/activation/generic/neon/lut.cpp386
-rw-r--r--src/cpu/kernels/activation/generic/sve/lut.cpp619
-rw-r--r--src/cpu/kernels/elementwise_unary/generic/neon/fp16.cpp5
-rw-r--r--src/cpu/kernels/elementwise_unary/generic/neon/fp32.cpp5
-rw-r--r--src/cpu/kernels/elementwise_unary/generic/neon/integer.cpp5
-rw-r--r--src/cpu/kernels/elementwise_unary/generic/neon/q8.cpp58
-rw-r--r--src/cpu/kernels/elementwise_unary/generic/sve/fp16.cpp7
-rw-r--r--src/cpu/kernels/elementwise_unary/generic/sve/fp32.cpp5
-rw-r--r--src/cpu/kernels/elementwise_unary/generic/sve/integer.cpp5
-rw-r--r--src/cpu/kernels/elementwise_unary/generic/sve/q8.cpp54
-rw-r--r--src/cpu/kernels/elementwise_unary/list.h8
-rw-r--r--src/cpu/kernels/lut/generic/neon/u8.cpp410
-rw-r--r--src/cpu/kernels/lut/generic/sve/u8.cpp647
-rw-r--r--src/cpu/kernels/lut/list.h54
-rw-r--r--tests/validation/NEON/ElementwiseAbsoluteValue.cpp33
-rw-r--r--tests/validation/NEON/ElementwiseExpLayer.cpp34
-rw-r--r--tests/validation/NEON/ElementwiseLog.cpp34
-rw-r--r--tests/validation/NEON/ElementwiseNegation.cpp33
-rw-r--r--tests/validation/NEON/ElementwiseRound.cpp37
-rw-r--r--tests/validation/NEON/ElementwiseRsqrtLayer.cpp33
-rw-r--r--tests/validation/NEON/ElementwiseSin.cpp34
-rw-r--r--tests/validation/fixtures/ElementwiseUnaryFixture.h212
-rw-r--r--tests/validation/reference/ElementwiseUnary.cpp30
32 files changed, 1840 insertions, 1077 deletions
diff --git a/Android.bp b/Android.bp
index 848ae1049b..5617812539 100644
--- a/Android.bp
+++ b/Android.bp
@@ -486,6 +486,7 @@ cc_library_static {
"src/cpu/kernels/elementwise_unary/generic/neon/fp32.cpp",
"src/cpu/kernels/elementwise_unary/generic/neon/impl.cpp",
"src/cpu/kernels/elementwise_unary/generic/neon/integer.cpp",
+ "src/cpu/kernels/elementwise_unary/generic/neon/q8.cpp",
"src/cpu/kernels/floor/neon/fp16.cpp",
"src/cpu/kernels/floor/neon/fp32.cpp",
"src/cpu/kernels/fuse_batch_normalization/generic/fp16.cpp",
@@ -513,6 +514,7 @@ cc_library_static {
"src/cpu/kernels/l2normlayer/generic/neon/fp16.cpp",
"src/cpu/kernels/l2normlayer/generic/neon/fp32.cpp",
"src/cpu/kernels/l2normlayer/generic/neon/impl.cpp",
+ "src/cpu/kernels/lut/generic/neon/u8.cpp",
"src/cpu/kernels/maxunpool/generic/neon/fp16.cpp",
"src/cpu/kernels/maxunpool/generic/neon/fp32.cpp",
"src/cpu/kernels/maxunpool/generic/neon/impl.cpp",
diff --git a/filelist.json b/filelist.json
index f858c6a29f..1e59adfc8e 100644
--- a/filelist.json
+++ b/filelist.json
@@ -863,6 +863,7 @@
],
"operators": {
"Activation": {
+ "deps": [ "LUT" ],
"files": {
"common": [
"src/cpu/operators/CpuActivation.cpp",
@@ -1412,6 +1413,7 @@
}
},
"ElementwiseUnary":{
+ "deps": [ "LUT" ],
"files": {
"common": [
"src/cpu/operators/CpuElementwiseUnary.cpp",
@@ -1422,13 +1424,17 @@
"common":["src/cpu/kernels/elementwise_unary/generic/neon/impl.cpp"],
"integer": ["src/cpu/kernels/elementwise_unary/generic/neon/integer.cpp"],
"fp32": ["src/cpu/kernels/elementwise_unary/generic/neon/fp32.cpp"],
- "fp16": ["src/cpu/kernels/elementwise_unary/generic/neon/fp16.cpp"]
+ "fp16": ["src/cpu/kernels/elementwise_unary/generic/neon/fp16.cpp"],
+ "qasymm8": ["src/cpu/kernels/elementwise_unary/generic/neon/q8.cpp"],
+ "qasymm8_signed": ["src/cpu/kernels/elementwise_unary/generic/neon/q8.cpp"]
},
"sve": {
"common": ["src/cpu/kernels/elementwise_unary/generic/sve/impl.cpp" ],
"integer": ["src/cpu/kernels/elementwise_unary/generic/sve/integer.cpp"],
"fp32": ["src/cpu/kernels/elementwise_unary/generic/sve/fp32.cpp"],
- "fp16": ["src/cpu/kernels/elementwise_unary/generic/sve/fp16.cpp"]
+ "fp16": ["src/cpu/kernels/elementwise_unary/generic/sve/fp16.cpp"],
+ "qasymm8": ["src/cpu/kernels/elementwise_unary/generic/sve/q8.cpp"],
+ "qasymm8_signed": ["src/cpu/kernels/elementwise_unary/generic/sve/q8.cpp"]
}
}
},
@@ -1767,6 +1773,19 @@
]
}
},
+ "LUT": {
+ "files": {
+ "common": [],
+ "neon":{
+ "qasymm8": ["src/cpu/kernels/lut/generic/neon/u8.cpp"],
+ "qasymm8_signed": ["src/cpu/kernels/lut/generic/neon/u8.cpp"]
+ },
+ "sve": {
+ "qasymm8": ["src/cpu/kernels/lut/generic/sve/u8.cpp"],
+ "qasymm8_signed": ["src/cpu/kernels/lut/generic/sve/u8.cpp"]
+ }
+ }
+ },
"LSTM": {
"deps": [
"Activation",
diff --git a/src/BUILD.bazel b/src/BUILD.bazel
index e2b7609f03..279c52e151 100644
--- a/src/BUILD.bazel
+++ b/src/BUILD.bazel
@@ -326,6 +326,8 @@ filegroup(
"cpu/kernels/elementwise_unary/generic/sve/fp32.cpp",
"cpu/kernels/elementwise_unary/generic/sve/impl.cpp",
"cpu/kernels/elementwise_unary/generic/sve/integer.cpp",
+ "cpu/kernels/elementwise_unary/generic/sve/q8.cpp",
+ "cpu/kernels/lut/generic/sve/u8.cpp",
"cpu/kernels/scale/sve/fp16.cpp",
"cpu/kernels/scale/sve/fp32.cpp",
"cpu/kernels/scale/sve/integer.cpp",
@@ -738,6 +740,7 @@ filegroup(
"cpu/kernels/elementwise_unary/generic/neon/fp32.cpp",
"cpu/kernels/elementwise_unary/generic/neon/impl.cpp",
"cpu/kernels/elementwise_unary/generic/neon/integer.cpp",
+ "cpu/kernels/elementwise_unary/generic/neon/q8.cpp",
"cpu/kernels/floor/neon/fp16.cpp",
"cpu/kernels/floor/neon/fp32.cpp",
"cpu/kernels/fuse_batch_normalization/generic/fp16.cpp",
@@ -762,6 +765,7 @@ filegroup(
"cpu/kernels/instancenorm/generic/neon/impl.cpp",
"cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp",
"cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp",
+ "cpu/kernels/lut/generic/neon/u8.cpp",
"cpu/kernels/l2normlayer/generic/neon/fp16.cpp",
"cpu/kernels/l2normlayer/generic/neon/fp32.cpp",
"cpu/kernels/l2normlayer/generic/neon/impl.cpp",
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index ca07ebf029..92c888056e 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -305,6 +305,8 @@ target_sources(
cpu/kernels/elementwise_unary/generic/sve/fp32.cpp
cpu/kernels/elementwise_unary/generic/sve/impl.cpp
cpu/kernels/elementwise_unary/generic/sve/integer.cpp
+ cpu/kernels/elementwise_unary/generic/sve/q8.cpp
+ cpu/kernels/lut/generic/sve/u8.cpp
cpu/kernels/scale/sve/fp16.cpp
cpu/kernels/scale/sve/fp32.cpp
cpu/kernels/scale/sve/integer.cpp
@@ -730,6 +732,7 @@ target_sources(
cpu/kernels/elementwise_unary/generic/neon/fp32.cpp
cpu/kernels/elementwise_unary/generic/neon/impl.cpp
cpu/kernels/elementwise_unary/generic/neon/integer.cpp
+ cpu/kernels/elementwise_unary/generic/neon/q8.cpp
cpu/kernels/floor/neon/fp16.cpp
cpu/kernels/floor/neon/fp32.cpp
cpu/kernels/fuse_batch_normalization/generic/fp16.cpp
@@ -754,6 +757,7 @@ target_sources(
cpu/kernels/instancenorm/generic/neon/impl.cpp
cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.cpp
cpu/kernels/internal/CpuPool2dAssemblyWrapperKernel.cpp
+ cpu/kernels/lut/generic/neon/u8.cpp
cpu/kernels/l2normlayer/generic/neon/fp16.cpp
cpu/kernels/l2normlayer/generic/neon/fp32.cpp
cpu/kernels/l2normlayer/generic/neon/impl.cpp
@@ -957,4 +961,3 @@ target_sources(
runtime/TensorAllocator.cpp
runtime/Utils.cpp
)
- \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/common/elementwise_unary.cl b/src/core/CL/cl_kernels/common/elementwise_unary.cl
index eba2dbc866..81835108a3 100644
--- a/src/core/CL/cl_kernels/common/elementwise_unary.cl
+++ b/src/core/CL/cl_kernels/common/elementwise_unary.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,13 +37,13 @@
#define fabs_op(input) fabs(input)
// Calculate natural_log
#define natural_log_op(input) log(input)
-// Calculate round (Cannot use round function as it rounds halfway cases away from zero).
+// Calculate round using round to nearest even rounding mode
+#define round_op(input) rint(input)
+
#if defined(VEC_SIZE)
#define VEC_TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
-#define round_op(input) CONVERT(CONVERT_SAT_ROUND(input, VEC_DATA_TYPE(int, VEC_SIZE), rte), VEC_TYPE)
#define logical_not_op(input) CONVERT(CONVERT(!input, VEC_TYPE) & ((VEC_TYPE)0x1), VEC_TYPE)
#else // defined(VEC_SIZE)
-#define round_op(input) CONVERT(CONVERT_SAT_ROUND(input, int, rte), DATA_TYPE)
#define logical_not_op(input) ((!input) & 0x1)
#endif // defined(VEC_SIZE)
diff --git a/src/core/NEON/NEMath.inl b/src/core/NEON/NEMath.inl
index 94bbc10ad8..8b2d1c3c37 100644
--- a/src/core/NEON/NEMath.inl
+++ b/src/core/NEON/NEMath.inl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2022 Arm Limited.
+ * Copyright (c) 2016-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -158,9 +158,9 @@ inline float32x4_t vexpq_f32(float32x4_t x)
const auto neg_ln2_lo = vreinterpretq_f32_u32(vdupq_n_u32(0xb5bfbe8e)); // -ln(2) from bits -20 to -42: -0x1.7f7d1cp-20f
const auto inf = vdupq_n_f32(std::numeric_limits<float>::infinity());
- const auto max_input = vdupq_n_f32(88.7f); // Approximately ln(0x1.fffffep+127)
+ const auto max_input = vdupq_n_f32(88.37f); // Approximately ln(2^127.5)
const auto zero = vdupq_n_f32(0.f);
- const auto min_input = vdupq_n_f32(-86.6f); // Approximately ln(2^-125)
+ const auto min_input = vdupq_n_f32(-86.64f); // Approximately ln(2^-125)
// Range reduction:
// e^x = 2^n * e^r
diff --git a/src/core/NEON/SVEMath.inl b/src/core/NEON/SVEMath.inl
index 5f41e2138d..8973d0b273 100644
--- a/src/core/NEON/SVEMath.inl
+++ b/src/core/NEON/SVEMath.inl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2022 Arm Limited.
+ * Copyright (c) 2020-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -96,9 +96,9 @@ inline svfloat32_t svexp_f32_z(svbool_t pg, svfloat32_t x)
const auto neg_ln2_lo = svreinterpret_f32_u32(svdup_n_u32(0xb5bfbe8e)); // -ln(2) from bits -20 to -42: -0x1.7f7d1cp-20f
const auto inf = svdup_n_f32(std::numeric_limits<float>::infinity());
- const auto max_input = svdup_n_f32(88.7f); // Approximately ln(0x1.fffffep+127)
+ const auto max_input = svdup_n_f32(88.37f); // Approximately ln(2^127.5)
const auto zero = svdup_n_f32(0.f);
- const auto min_input = svdup_n_f32(-86.6f); // Approximately ln(2^-125)
+ const auto min_input = svdup_n_f32(-86.64f); // Approximately ln(2^-125)
// Range reduction:
// e^x = 2^n * e^r
diff --git a/src/cpu/kernels/CpuElementwiseUnaryKernel.cpp b/src/cpu/kernels/CpuElementwiseUnaryKernel.cpp
index 335de78aca..0adf28af63 100644
--- a/src/cpu/kernels/CpuElementwiseUnaryKernel.cpp
+++ b/src/cpu/kernels/CpuElementwiseUnaryKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2022 Arm Limited.
+ * Copyright (c) 2018-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,6 +26,7 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
#include "src/core/CPP/Validate.h"
#include "src/core/common/Registrars.h"
@@ -42,6 +43,72 @@ namespace kernels
{
namespace
{
+#ifdef __aarch64__
+
+std::unique_ptr<uint8_t[]> q8_prepare_lut(ElementWiseUnary op, const ITensorInfo *src, const ITensorInfo *dst)
+{
+ ARM_COMPUTE_ERROR_ON(src->data_type() != dst->data_type());
+ ARM_COMPUTE_ERROR_ON(!is_data_type_quantized(src->data_type()));
+ ARM_COMPUTE_ERROR_ON(src->element_size() != 1);
+
+ auto lut = std::unique_ptr<uint8_t[]>(new uint8_t[256]);
+ const auto is_signed = src->data_type() == DataType::QASYMM8_SIGNED;
+ const auto src_qi = src->quantization_info().uniform();
+ const auto dst_qi = dst->quantization_info().uniform();
+
+ const auto dst_min_fp = (((is_signed) ? -128 : 0) - dst_qi.offset) * dst_qi.scale;
+ const auto dst_max_fp = (((is_signed) ? 127 : 255) - dst_qi.offset) * dst_qi.scale;
+
+ for(int i = 0; i < 256; ++i)
+ {
+ const auto in = (is_signed) ? dequantize_qasymm8_signed(static_cast<int8_t>(i), src_qi) : dequantize_qasymm8(i, src_qi);
+ float result = 0;
+
+ switch(op)
+ {
+ case ElementWiseUnary::RSQRT:
+ result = 1 / sqrt(in);
+ break;
+
+ case ElementWiseUnary::EXP:
+ result = std::exp(in);
+ break;
+
+ case ElementWiseUnary::NEG:
+ result = -in;
+ break;
+
+ case ElementWiseUnary::LOG:
+ result = std::log(in);
+ break;
+
+ case ElementWiseUnary::ABS:
+ result = std::abs(in);
+ break;
+
+ case ElementWiseUnary::ROUND:
+ result = support::cpp11::nearbyint(in);
+ break;
+
+ case ElementWiseUnary::SIN:
+ result = std::sin(in);
+ break;
+
+ default:
+ ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
+ }
+
+ result = utility::clamp(result, dst_min_fp, dst_max_fp);
+
+ const auto out = (is_signed) ? static_cast<uint8_t>(quantize_qasymm8_signed(result, dst_qi)) : quantize_qasymm8(result, dst_qi);
+ lut[i] = out;
+ }
+
+ return lut;
+}
+
+#endif // __aarch64__
+
static const std::vector<CpuElementwiseUnaryKernel::ElementwiseUnaryKernel> available_kernels =
{
{
@@ -50,7 +117,8 @@ static const std::vector<CpuElementwiseUnaryKernel::ElementwiseUnaryKernel> avai
{
return (data.dt == DataType::F32 && data.isa.sve);
},
- REGISTER_FP32_SVE(sve_fp32_elementwise_unary)
+ REGISTER_FP32_SVE(sve_fp32_elementwise_unary),
+ nullptr,
},
{
"sve_fp16_elementwise_unary",
@@ -59,6 +127,7 @@ static const std::vector<CpuElementwiseUnaryKernel::ElementwiseUnaryKernel> avai
return (data.dt == DataType::F16 && data.isa.sve && data.isa.fp16);
},
REGISTER_FP16_SVE(sve_fp16_elementwise_unary),
+ nullptr,
},
{
"sve_s32_elementwise_unary",
@@ -67,6 +136,7 @@ static const std::vector<CpuElementwiseUnaryKernel::ElementwiseUnaryKernel> avai
return (data.dt == DataType::S32 && data.isa.sve);
},
REGISTER_INTEGER_SVE(sve_s32_elementwise_unary),
+ nullptr,
},
{
"neon_fp32_elementwise_unary",
@@ -75,6 +145,7 @@ static const std::vector<CpuElementwiseUnaryKernel::ElementwiseUnaryKernel> avai
return data.dt == DataType::F32;
},
REGISTER_FP32_NEON(neon_fp32_elementwise_unary),
+ nullptr,
},
{
"neon_fp16_elementwise_unary",
@@ -83,6 +154,7 @@ static const std::vector<CpuElementwiseUnaryKernel::ElementwiseUnaryKernel> avai
return data.dt == DataType::F16 && data.isa.fp16;
},
REGISTER_FP16_NEON(neon_fp16_elementwise_unary),
+ nullptr,
},
{
"neon_s32_elementwise_unary",
@@ -91,7 +163,28 @@ static const std::vector<CpuElementwiseUnaryKernel::ElementwiseUnaryKernel> avai
return data.dt == DataType::S32;
},
REGISTER_INTEGER_NEON(neon_s32_elementwise_unary),
+ nullptr,
},
+#ifdef __aarch64__
+ {
+ "sve_q8_elementwise_unary",
+ [](const DataTypeISASelectorData & data)
+ {
+ return (data.dt == DataType::QASYMM8 || data.dt == DataType::QASYMM8_SIGNED) && data.isa.sve;
+ },
+ REGISTER_QASYMM8_SVE(sve_q8_elementwise_unary),
+ &q8_prepare_lut,
+ },
+ {
+ "neon_q8_elementwise_unary",
+ [](const DataTypeISASelectorData & data)
+ {
+ return data.dt == DataType::QASYMM8 || data.dt == DataType::QASYMM8_SIGNED;
+ },
+ REGISTER_QASYMM8_NEON(neon_q8_elementwise_unary),
+ &q8_prepare_lut,
+ },
+#endif // __aarch64__
};
} // namespace
@@ -112,6 +205,11 @@ void CpuElementwiseUnaryKernel::configure(ElementWiseUnary op, const ITensorInfo
return;
}
+ if(uk->prepare_func != nullptr)
+ {
+ _lut = uk->prepare_func(op, &src, &dst);
+ }
+
auto shape_and_window = compute_output_shape_and_window(src.tensor_shape());
auto_init_if_empty(dst, shape_and_window.first, 1, src.data_type());
ICpuKernel::configure(shape_and_window.second);
@@ -132,11 +230,11 @@ Status CpuElementwiseUnaryKernel::validate(ElementWiseUnary op, const ITensorInf
case ElementWiseUnary::LOG:
case ElementWiseUnary::ROUND:
case ElementWiseUnary::SIN:
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::F16, DataType::F32, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
break;
case ElementWiseUnary::NEG:
case ElementWiseUnary::ABS:
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::F16, DataType::F32, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::F16, DataType::F32, DataType::S32, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
break;
default:
ARM_COMPUTE_ERROR("ElementWiseUnary operation not supported");
@@ -157,7 +255,7 @@ void CpuElementwiseUnaryKernel::run_op(ITensorPack &tensors, const Window &windo
auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
auto dst = tensors.get_tensor(TensorType::ACL_DST);
- _run_method(src, dst, window, _op);
+ _run_method(src, dst, window, _op, _lut.get());
}
const char *CpuElementwiseUnaryKernel::name() const
diff --git a/src/cpu/kernels/CpuElementwiseUnaryKernel.h b/src/cpu/kernels/CpuElementwiseUnaryKernel.h
index 138049a60c..00188f0d49 100644
--- a/src/cpu/kernels/CpuElementwiseUnaryKernel.h
+++ b/src/cpu/kernels/CpuElementwiseUnaryKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2022 Arm Limited.
+ * Copyright (c) 2018-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,8 @@ namespace kernels
class CpuElementwiseUnaryKernel : public ICpuKernel<CpuElementwiseUnaryKernel>
{
private:
- using ElementwiseUnaryUkernelPtr = std::add_pointer<void(const ITensor *, ITensor *, const Window &, ElementWiseUnary)>::type;
+ using ElementwiseUnaryUkernelPtr = std::add_pointer<void(const ITensor *, ITensor *, const Window &, ElementWiseUnary, const uint8_t *)>::type;
+ using ElementwiseUnaryPreparePtr = std::add_pointer<std::unique_ptr<uint8_t[]>(ElementWiseUnary op, const ITensorInfo *, const ITensorInfo *)>::type;
public:
CpuElementwiseUnaryKernel() = default;
@@ -72,6 +73,7 @@ public:
const char *name;
const DataTypeISASelectorPtr is_selected;
ElementwiseUnaryUkernelPtr ukernel;
+ ElementwiseUnaryPreparePtr prepare_func;
};
static const std::vector<ElementwiseUnaryKernel> &get_available_kernels();
@@ -80,6 +82,7 @@ private:
ElementWiseUnary _op{};
ElementwiseUnaryUkernelPtr _run_method{ nullptr };
std::string _name{};
+ std::unique_ptr<uint8_t[]> _lut{};
};
} // namespace kernels
} // namespace cpu
diff --git a/src/cpu/kernels/activation/generic/neon/lut.cpp b/src/cpu/kernels/activation/generic/neon/lut.cpp
index 8ceb7d8cbc..90690ffcaa 100644
--- a/src/cpu/kernels/activation/generic/neon/lut.cpp
+++ b/src/cpu/kernels/activation/generic/neon/lut.cpp
@@ -23,394 +23,12 @@
*/
#include "arm_compute/core/Helpers.h"
-
-#include <arm_neon.h>
-#include <cstdint>
+#include "src/cpu/kernels/lut/list.h"
namespace arm_compute
{
namespace cpu
{
-namespace
-{
-#ifdef __aarch64__
-
-void substitute_bytes_neon(
- const uint8_t *table,
- size_t num_strings,
- size_t string_length,
- const uint8_t *const *input,
- uint8_t *const *output)
-{
- __asm__ __volatile__(
- "ldr q16, [%x[table], #0x0]\n"
- "ldr q17, [%x[table], #0x10]\n"
- "mov x23, #0x0\n"
- "ldr q18, [%x[table], #0x20]\n"
- "ldr q19, [%x[table], #0x30]\n"
- "ldr q20, [%x[table], #0x40]\n"
- "ldr q21, [%x[table], #0x50]\n"
- "ldr q22, [%x[table], #0x60]\n"
- "ldr q23, [%x[table], #0x70]\n"
- "ldr q24, [%x[table], #0x80]\n"
- "ldr q25, [%x[table], #0x90]\n"
- "ldr q26, [%x[table], #0xa0]\n"
- "ldr q27, [%x[table], #0xb0]\n"
- "ldr q28, [%x[table], #0xc0]\n"
- "ldr q29, [%x[table], #0xd0]\n"
- "ldr q30, [%x[table], #0xe0]\n"
- "ldr q31, [%x[table], #0xf0]\n"
- "1:" // string loop
- "ldr x22, [%x[input], x23, LSL #0x3]\n"
- "ldr x21, [%x[output], x23, LSL #0x3]\n"
- "movi v11.16b, #0x40\n"
- "movi v10.16b, #0x80\n"
- "movi v9.16b, #0xc0\n"
- "mov x20, %x[string_length]\n"
- "2:" // 4 rounds: width loop
- "cmp x20, #0x30\n"
- "bge 27f\n"
- "tbz x20, #5, 10f\n"
- "ld1 { v8.16b }, [x22], #0x10\n"
- "ld1 { v13.16b }, [x22], #0x10\n"
- "tbz x20, #3, 6f\n"
- "ldr d12, [x22], #0x8\n"
- "tbz x20, #2, 4f\n"
- "ld1 { v12.s }[2], [x22], #0x4\n"
- "tbz x20, #1, 3f\n"
- "ld1 { v12.h }[6], [x22], #0x2\n"
- "tbz x20, #0, 26f\n"
- "ld1 { v12.b }[14], [x22]\n"
- "b 26f\n"
- "3:" // 4 rounds: Partial load: partial_1_44
- "tbz x20, #0, 26f\n"
- "ld1 { v12.b }[12], [x22]\n"
- "b 26f\n"
- "4:" // 4 rounds: Partial load: partial_2_40
- "tbz x20, #1, 5f\n"
- "ld1 { v12.h }[4], [x22], #0x2\n"
- "tbz x20, #0, 26f\n"
- "ld1 { v12.b }[10], [x22]\n"
- "b 26f\n"
- "5:" // 4 rounds: Partial load: partial_1_40
- "tbz x20, #0, 26f\n"
- "ld1 { v12.b }[8], [x22]\n"
- "b 26f\n"
- "6:" // 4 rounds: Partial load: partial_4_32
- "tbz x20, #2, 8f\n"
- "ldr s12, [x22], #0x4\n"
- "tbz x20, #1, 7f\n"
- "ld1 { v12.h }[2], [x22], #0x2\n"
- "tbz x20, #0, 26f\n"
- "ld1 { v12.b }[6], [x22]\n"
- "b 26f\n"
- "7:" // 4 rounds: Partial load: partial_1_36
- "tbz x20, #0, 26f\n"
- "ld1 { v12.b }[4], [x22]\n"
- "b 26f\n"
- "8:" // 4 rounds: Partial load: partial_2_32
- "tbz x20, #1, 9f\n"
- "ldr h12, [x22], #0x2\n"
- "tbz x20, #0, 26f\n"
- "ld1 { v12.b }[2], [x22]\n"
- "b 26f\n"
- "9:" // 4 rounds: Partial load: partial_1_32
- "tbz x20, #0, 26f\n"
- "ldr b12, [x22, #0x0]\n"
- "b 26f\n"
- "10:" // 4 rounds: Partial load: partial_16_0
- "tbz x20, #4, 18f\n"
- "ld1 { v8.16b }, [x22], #0x10\n"
- "tbz x20, #3, 14f\n"
- "ldr d13, [x22], #0x8\n"
- "tbz x20, #2, 12f\n"
- "ld1 { v13.s }[2], [x22], #0x4\n"
- "tbz x20, #1, 11f\n"
- "ld1 { v13.h }[6], [x22], #0x2\n"
- "tbz x20, #0, 26f\n"
- "ld1 { v13.b }[14], [x22]\n"
- "b 26f\n"
- "11:" // 4 rounds: Partial load: partial_1_28
- "tbz x20, #0, 26f\n"
- "ld1 { v13.b }[12], [x22]\n"
- "b 26f\n"
- "12:" // 4 rounds: Partial load: partial_2_24
- "tbz x20, #1, 13f\n"
- "ld1 { v13.h }[4], [x22], #0x2\n"
- "tbz x20, #0, 26f\n"
- "ld1 { v13.b }[10], [x22]\n"
- "b 26f\n"
- "13:" // 4 rounds: Partial load: partial_1_24
- "tbz x20, #0, 26f\n"
- "ld1 { v13.b }[8], [x22]\n"
- "b 26f\n"
- "14:" // 4 rounds: Partial load: partial_4_16
- "tbz x20, #2, 16f\n"
- "ldr s13, [x22], #0x4\n"
- "tbz x20, #1, 15f\n"
- "ld1 { v13.h }[2], [x22], #0x2\n"
- "tbz x20, #0, 26f\n"
- "ld1 { v13.b }[6], [x22]\n"
- "b 26f\n"
- "15:" // 4 rounds: Partial load: partial_1_20
- "tbz x20, #0, 26f\n"
- "ld1 { v13.b }[4], [x22]\n"
- "b 26f\n"
- "16:" // 4 rounds: Partial load: partial_2_16
- "tbz x20, #1, 17f\n"
- "ldr h13, [x22], #0x2\n"
- "tbz x20, #0, 26f\n"
- "ld1 { v13.b }[2], [x22]\n"
- "b 26f\n"
- "17:" // 4 rounds: Partial load: partial_1_16
- "tbz x20, #0, 26f\n"
- "ldr b13, [x22, #0x0]\n"
- "b 26f\n"
- "18:" // 4 rounds: Partial load: partial_8_0
- "tbz x20, #3, 22f\n"
- "ldr d8, [x22], #0x8\n"
- "tbz x20, #2, 20f\n"
- "ld1 { v8.s }[2], [x22], #0x4\n"
- "tbz x20, #1, 19f\n"
- "ld1 { v8.h }[6], [x22], #0x2\n"
- "tbz x20, #0, 26f\n"
- "ld1 { v8.b }[14], [x22]\n"
- "b 26f\n"
- "19:" // 4 rounds: Partial load: partial_1_12
- "tbz x20, #0, 26f\n"
- "ld1 { v8.b }[12], [x22]\n"
- "b 26f\n"
- "20:" // 4 rounds: Partial load: partial_2_8
- "tbz x20, #1, 21f\n"
- "ld1 { v8.h }[4], [x22], #0x2\n"
- "tbz x20, #0, 26f\n"
- "ld1 { v8.b }[10], [x22]\n"
- "b 26f\n"
- "21:" // 4 rounds: Partial load: partial_1_8
- "tbz x20, #0, 26f\n"
- "ld1 { v8.b }[8], [x22]\n"
- "b 26f\n"
- "22:" // 4 rounds: Partial load: partial_4_0
- "tbz x20, #2, 24f\n"
- "ldr s8, [x22], #0x4\n"
- "tbz x20, #1, 23f\n"
- "ld1 { v8.h }[2], [x22], #0x2\n"
- "tbz x20, #0, 26f\n"
- "ld1 { v8.b }[6], [x22]\n"
- "b 26f\n"
- "23:" // 4 rounds: Partial load: partial_1_4
- "tbz x20, #0, 26f\n"
- "ld1 { v8.b }[4], [x22]\n"
- "b 26f\n"
- "24:" // 4 rounds: Partial load: partial_2_0
- "tbz x20, #1, 25f\n"
- "ldr h8, [x22], #0x2\n"
- "tbz x20, #0, 26f\n"
- "ld1 { v8.b }[2], [x22]\n"
- "b 26f\n"
- "25:" // 4 rounds: Partial load: partial_1_0
- "ldr b8, [x22, #0x0]\n"
- "26:" // 4 rounds: Partial load: Done
- "b 28f\n"
- "27:" // 4 rounds: Full load
- "ldr q8, [x22, #0x0]\n"
- "ldr q13, [x22, #0x10]\n"
- "ldr q12, [x22, #0x20]\n"
- "add x22, x22, #0x30\n"
- "28:" // 4 rounds: Load done
- "sub v0.16b, v8.16b, v11.16b\n"
- "sub v7.16b, v8.16b, v10.16b\n"
- "tbl v0.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v0.16b\n"
- "sub v6.16b, v8.16b, v9.16b\n"
- "sub v5.16b, v13.16b, v11.16b\n"
- "tbl v8.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v8.16b\n"
- "sub v4.16b, v13.16b, v10.16b\n"
- "sub v3.16b, v13.16b, v9.16b\n"
- "tbl v7.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v7.16b\n"
- "sub v2.16b, v12.16b, v11.16b\n"
- "sub v1.16b, v12.16b, v10.16b\n"
- "tbl v6.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v6.16b\n"
- "tbl v13.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v13.16b\n"
- "tbl v5.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v5.16b\n"
- "orr v8.16b, v8.16b, v0.16b\n"
- "sub v0.16b, v12.16b, v9.16b\n"
- "tbl v4.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v4.16b\n"
- "tbl v3.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v3.16b\n"
- "tbl v12.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v12.16b\n"
- "tbl v2.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v2.16b\n"
- "orr v7.16b, v7.16b, v6.16b\n"
- "tbl v1.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v1.16b\n"
- "tbl v0.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v0.16b\n"
- "orr v13.16b, v13.16b, v5.16b\n"
- "orr v4.16b, v4.16b, v3.16b\n"
- "orr v12.16b, v12.16b, v2.16b\n"
- "cmp x20, #0x30\n"
- "orr v1.16b, v1.16b, v0.16b\n"
- "orr v8.16b, v8.16b, v7.16b\n"
- "orr v13.16b, v13.16b, v4.16b\n"
- "orr v12.16b, v12.16b, v1.16b\n"
- "bge 53f\n"
- "tbz x20, #5, 36f\n"
- "st1 { v8.16b }, [x21], #0x10\n"
- "st1 { v13.16b }, [x21], #0x10\n"
- "tbz x20, #3, 32f\n"
- "str d12, [x21], #0x8\n"
- "tbz x20, #2, 30f\n"
- "st1 { v12.s }[2], [x21], #0x4\n"
- "tbz x20, #1, 29f\n"
- "st1 { v12.h }[6], [x21], #0x2\n"
- "tbz x20, #0, 52f\n"
- "st1 { v12.b }[14], [x21]\n"
- "b 52f\n"
- "29:" // 4 rounds: Partial writeback: partial_1_44
- "tbz x20, #0, 52f\n"
- "st1 { v12.b }[12], [x21]\n"
- "b 52f\n"
- "30:" // 4 rounds: Partial writeback: partial_2_40
- "tbz x20, #1, 31f\n"
- "st1 { v12.h }[4], [x21], #0x2\n"
- "tbz x20, #0, 52f\n"
- "st1 { v12.b }[10], [x21]\n"
- "b 52f\n"
- "31:" // 4 rounds: Partial writeback: partial_1_40
- "tbz x20, #0, 52f\n"
- "st1 { v12.b }[8], [x21]\n"
- "b 52f\n"
- "32:" // 4 rounds: Partial writeback: partial_4_32
- "tbz x20, #2, 34f\n"
- "str s12, [x21], #0x4\n"
- "tbz x20, #1, 33f\n"
- "st1 { v12.h }[2], [x21], #0x2\n"
- "tbz x20, #0, 52f\n"
- "st1 { v12.b }[6], [x21]\n"
- "b 52f\n"
- "33:" // 4 rounds: Partial writeback: partial_1_36
- "tbz x20, #0, 52f\n"
- "st1 { v12.b }[4], [x21]\n"
- "b 52f\n"
- "34:" // 4 rounds: Partial writeback: partial_2_32
- "tbz x20, #1, 35f\n"
- "str h12, [x21], #0x2\n"
- "tbz x20, #0, 52f\n"
- "st1 { v12.b }[2], [x21]\n"
- "b 52f\n"
- "35:" // 4 rounds: Partial writeback: partial_1_32
- "tbz x20, #0, 52f\n"
- "str b12, [x21, #0x0]\n"
- "b 52f\n"
- "36:" // 4 rounds: Partial writeback: partial_16_0
- "tbz x20, #4, 44f\n"
- "st1 { v8.16b }, [x21], #0x10\n"
- "tbz x20, #3, 40f\n"
- "str d13, [x21], #0x8\n"
- "tbz x20, #2, 38f\n"
- "st1 { v13.s }[2], [x21], #0x4\n"
- "tbz x20, #1, 37f\n"
- "st1 { v13.h }[6], [x21], #0x2\n"
- "tbz x20, #0, 52f\n"
- "st1 { v13.b }[14], [x21]\n"
- "b 52f\n"
- "37:" // 4 rounds: Partial writeback: partial_1_28
- "tbz x20, #0, 52f\n"
- "st1 { v13.b }[12], [x21]\n"
- "b 52f\n"
- "38:" // 4 rounds: Partial writeback: partial_2_24
- "tbz x20, #1, 39f\n"
- "st1 { v13.h }[4], [x21], #0x2\n"
- "tbz x20, #0, 52f\n"
- "st1 { v13.b }[10], [x21]\n"
- "b 52f\n"
- "39:" // 4 rounds: Partial writeback: partial_1_24
- "tbz x20, #0, 52f\n"
- "st1 { v13.b }[8], [x21]\n"
- "b 52f\n"
- "40:" // 4 rounds: Partial writeback: partial_4_16
- "tbz x20, #2, 42f\n"
- "str s13, [x21], #0x4\n"
- "tbz x20, #1, 41f\n"
- "st1 { v13.h }[2], [x21], #0x2\n"
- "tbz x20, #0, 52f\n"
- "st1 { v13.b }[6], [x21]\n"
- "b 52f\n"
- "41:" // 4 rounds: Partial writeback: partial_1_20
- "tbz x20, #0, 52f\n"
- "st1 { v13.b }[4], [x21]\n"
- "b 52f\n"
- "42:" // 4 rounds: Partial writeback: partial_2_16
- "tbz x20, #1, 43f\n"
- "str h13, [x21], #0x2\n"
- "tbz x20, #0, 52f\n"
- "st1 { v13.b }[2], [x21]\n"
- "b 52f\n"
- "43:" // 4 rounds: Partial writeback: partial_1_16
- "tbz x20, #0, 52f\n"
- "str b13, [x21, #0x0]\n"
- "b 52f\n"
- "44:" // 4 rounds: Partial writeback: partial_8_0
- "tbz x20, #3, 48f\n"
- "str d8, [x21], #0x8\n"
- "tbz x20, #2, 46f\n"
- "st1 { v8.s }[2], [x21], #0x4\n"
- "tbz x20, #1, 45f\n"
- "st1 { v8.h }[6], [x21], #0x2\n"
- "tbz x20, #0, 52f\n"
- "st1 { v8.b }[14], [x21]\n"
- "b 52f\n"
- "45:" // 4 rounds: Partial writeback: partial_1_12
- "tbz x20, #0, 52f\n"
- "st1 { v8.b }[12], [x21]\n"
- "b 52f\n"
- "46:" // 4 rounds: Partial writeback: partial_2_8
- "tbz x20, #1, 47f\n"
- "st1 { v8.h }[4], [x21], #0x2\n"
- "tbz x20, #0, 52f\n"
- "st1 { v8.b }[10], [x21]\n"
- "b 52f\n"
- "47:" // 4 rounds: Partial writeback: partial_1_8
- "tbz x20, #0, 52f\n"
- "st1 { v8.b }[8], [x21]\n"
- "b 52f\n"
- "48:" // 4 rounds: Partial writeback: partial_4_0
- "tbz x20, #2, 50f\n"
- "str s8, [x21], #0x4\n"
- "tbz x20, #1, 49f\n"
- "st1 { v8.h }[2], [x21], #0x2\n"
- "tbz x20, #0, 52f\n"
- "st1 { v8.b }[6], [x21]\n"
- "b 52f\n"
- "49:" // 4 rounds: Partial writeback: partial_1_4
- "tbz x20, #0, 52f\n"
- "st1 { v8.b }[4], [x21]\n"
- "b 52f\n"
- "50:" // 4 rounds: Partial writeback: partial_2_0
- "tbz x20, #1, 51f\n"
- "str h8, [x21], #0x2\n"
- "tbz x20, #0, 52f\n"
- "st1 { v8.b }[2], [x21]\n"
- "b 52f\n"
- "51:" // 4 rounds: Partial writeback: partial_1_0
- "str b8, [x21, #0x0]\n"
- "52:" // 4 rounds: Partial writeback: Done
- "b 54f\n"
- "53:" // 4 rounds: Full writeback
- "str q8, [x21, #0x0]\n"
- "str q13, [x21, #0x10]\n"
- "str q12, [x21, #0x20]\n"
- "add x21, x21, #0x30\n"
- "54:" // 4 rounds: Writeback done
- "subs x20, x20, #0x30\n"
- "bgt 2b\n"
- "add x23, x23, #0x1\n"
- "cmp x23, %x[num_strings]\n"
- "bne 1b\n"
- :
- : [input] "r"(input), [num_strings] "r"(num_strings), [output] "r"(output), [string_length] "r"(string_length), [table] "r"(table)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23");
-}
-
-#endif // __aarch64__
-} // namespace
-
#ifdef __aarch64__
void neon_q8_activation_lut(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
{
@@ -424,7 +42,7 @@ void neon_q8_activation_lut(const ITensor *src, ITensor *dst, const ActivationLa
{
const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr());
auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
- substitute_bytes_neon(act_info.lut().data(), 1u, window_end_x, &input_ptr, &output_ptr);
+ lut_u8_neon(act_info.lut().data(), 1u, window_end_x, &input_ptr, &output_ptr);
},
input, output);
}
diff --git a/src/cpu/kernels/activation/generic/sve/lut.cpp b/src/cpu/kernels/activation/generic/sve/lut.cpp
index b73c87e319..b4042662b9 100644
--- a/src/cpu/kernels/activation/generic/sve/lut.cpp
+++ b/src/cpu/kernels/activation/generic/sve/lut.cpp
@@ -23,627 +23,12 @@
*/
#include "arm_compute/core/Helpers.h"
-
-#include <arm_neon.h>
-#include <cstdint>
+#include "src/cpu/kernels/lut/list.h"
namespace arm_compute
{
namespace cpu
{
-namespace
-{
-#ifdef __aarch64__
-void substitute_bytes_sve(
- const uint8_t *table,
- size_t num_strings,
- size_t string_length,
- const uint8_t *const *input,
- uint8_t *const *output)
-{
- __asm__ __volatile__(
- "ptrue p0.b\n"
- "cntd x25\n"
- "addvl %x[table], %x[table], #8\n"
- "ld1b { z16.b }, p0/Z, [%x[table], #-8, MUL VL]\n"
- "tbnz x25, #5, 1f\n"
- "ld1b { z17.b }, p0/Z, [%x[table], #-7, MUL VL]\n"
- "tbnz x25, #4, 1f\n"
- "ld1b { z18.b }, p0/Z, [%x[table], #-6, MUL VL]\n"
- "ld1b { z19.b }, p0/Z, [%x[table], #-5, MUL VL]\n"
- "tbnz x25, #3, 1f\n"
- "ld1b { z20.b }, p0/Z, [%x[table], #-4, MUL VL]\n"
- "ld1b { z21.b }, p0/Z, [%x[table], #-3, MUL VL]\n"
- "ld1b { z22.b }, p0/Z, [%x[table], #-2, MUL VL]\n"
- "ld1b { z23.b }, p0/Z, [%x[table], #-1, MUL VL]\n"
- "tbnz x25, #2, 1f\n"
- "ld1b { z24.b }, p0/Z, [%x[table]]\n"
- "ld1b { z25.b }, p0/Z, [%x[table], #1, MUL VL]\n"
- "ld1b { z26.b }, p0/Z, [%x[table], #2, MUL VL]\n"
- "ld1b { z27.b }, p0/Z, [%x[table], #3, MUL VL]\n"
- "ld1b { z28.b }, p0/Z, [%x[table], #4, MUL VL]\n"
- "ld1b { z29.b }, p0/Z, [%x[table], #5, MUL VL]\n"
- "ld1b { z30.b }, p0/Z, [%x[table], #6, MUL VL]\n"
- "ld1b { z31.b }, p0/Z, [%x[table], #7, MUL VL]\n"
- "1:" // Table load done
- "mov x24, #0x0\n"
- "2:" // string loop
- "ldr x23, [%x[input], x24, LSL #0x3]\n"
- "ldr x22, [%x[output], x24, LSL #0x3]\n"
- "tbnz x25, #5, 14f\n"
- "tbnz x25, #4, 11f\n"
- "tbnz x25, #3, 8f\n"
- "tbnz x25, #2, 5f\n"
- "mov z12.b, #0x10\n"
- "mov x21, %x[string_length]\n"
- "ptrue p5.b\n"
- "ptrue p4.b\n"
- "ptrue p3.b\n"
- "ptrue p2.b\n"
- "ptrue p1.b\n"
- "ptrue p0.b\n"
- "3:" // 16 rounds: width loop
- "addvl x20, x21, #-6\n"
- "cmp x20, XZR\n"
- "bge 4f\n"
- "mov x20, #0x0\n"
- "addvl x20, x20, #1\n"
- "whilelt p5.b, XZR, x21\n"
- "whilelt p4.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p3.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p2.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p1.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p0.b, x20, x21\n"
- "4:" // 16 rounds: predicate OK
- "ld1b { z11.b }, p5/Z, [x23]\n"
- "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
- "tbl z9.b, { z16.b }, z11.b\n"
- "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
- "sub z11.b, z11.b, z12.b\n"
- "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
- "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
- "tbl z4.b, { z16.b }, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- "tbl z3.b, { z16.b }, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- "tbl z2.b, { z16.b }, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- "tbl z1.b, { z16.b }, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- "tbl z0.b, { z16.b }, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2e89 // tbx z9.b, z20.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2e84 // tbx z4.b, z20.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282e83 // tbx z3.b, z20.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272e82 // tbx z2.b, z20.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262e81 // tbx z1.b, z20.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252e80 // tbx z0.b, z20.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2ea9 // tbx z9.b, z21.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2ea4 // tbx z4.b, z21.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282ea3 // tbx z3.b, z21.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272ea2 // tbx z2.b, z21.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262ea1 // tbx z1.b, z21.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252ea0 // tbx z0.b, z21.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2ec9 // tbx z9.b, z22.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2ec4 // tbx z4.b, z22.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282ec3 // tbx z3.b, z22.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272ec2 // tbx z2.b, z22.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262ec1 // tbx z1.b, z22.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252ec0 // tbx z0.b, z22.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2ee9 // tbx z9.b, z23.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2ee4 // tbx z4.b, z23.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282ee3 // tbx z3.b, z23.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272ee2 // tbx z2.b, z23.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262ee1 // tbx z1.b, z23.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252ee0 // tbx z0.b, z23.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2f09 // tbx z9.b, z24.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2f04 // tbx z4.b, z24.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282f03 // tbx z3.b, z24.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272f02 // tbx z2.b, z24.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262f01 // tbx z1.b, z24.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252f00 // tbx z0.b, z24.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2f29 // tbx z9.b, z25.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2f24 // tbx z4.b, z25.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282f23 // tbx z3.b, z25.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272f22 // tbx z2.b, z25.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262f21 // tbx z1.b, z25.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252f20 // tbx z0.b, z25.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2f49 // tbx z9.b, z26.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2f44 // tbx z4.b, z26.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282f43 // tbx z3.b, z26.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272f42 // tbx z2.b, z26.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262f41 // tbx z1.b, z26.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252f40 // tbx z0.b, z26.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2f69 // tbx z9.b, z27.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2f64 // tbx z4.b, z27.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282f63 // tbx z3.b, z27.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272f62 // tbx z2.b, z27.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262f61 // tbx z1.b, z27.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252f60 // tbx z0.b, z27.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2f89 // tbx z9.b, z28.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2f84 // tbx z4.b, z28.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282f83 // tbx z3.b, z28.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272f82 // tbx z2.b, z28.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262f81 // tbx z1.b, z28.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252f80 // tbx z0.b, z28.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2fa9 // tbx z9.b, z29.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2fa4 // tbx z4.b, z29.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282fa3 // tbx z3.b, z29.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272fa2 // tbx z2.b, z29.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262fa1 // tbx z1.b, z29.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252fa0 // tbx z0.b, z29.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- "addvl x21, x21, #-6\n"
- ".inst 0x052b2fc9 // tbx z9.b, z30.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2fc4 // tbx z4.b, z30.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282fc3 // tbx z3.b, z30.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272fc2 // tbx z2.b, z30.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262fc1 // tbx z1.b, z30.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252fc0 // tbx z0.b, z30.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- "cmp x21, XZR\n"
- ".inst 0x052b2fe9 // tbx z9.b, z31.b, z11.b\n"
- ".inst 0x052a2fe4 // tbx z4.b, z31.b, z10.b\n"
- ".inst 0x05282fe3 // tbx z3.b, z31.b, z8.b\n"
- "st1b { z9.b }, p5, [x22]\n"
- ".inst 0x05272fe2 // tbx z2.b, z31.b, z7.b\n"
- ".inst 0x05262fe1 // tbx z1.b, z31.b, z6.b\n"
- "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
- ".inst 0x05252fe0 // tbx z0.b, z31.b, z5.b\n"
- "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
- "addvl x23, x23, #6\n"
- "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
- "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
- "addvl x22, x22, #6\n"
- "bgt 3b\n"
- "b 17f\n"
- "5:" // 256 bits
- "mov z12.b, #0x20\n"
- "mov x21, %x[string_length]\n"
- "ptrue p5.b\n"
- "ptrue p4.b\n"
- "ptrue p3.b\n"
- "ptrue p2.b\n"
- "ptrue p1.b\n"
- "ptrue p0.b\n"
- "6:" // 8 rounds: width loop
- "addvl x20, x21, #-6\n"
- "cmp x20, XZR\n"
- "bge 7f\n"
- "mov x20, #0x0\n"
- "addvl x20, x20, #1\n"
- "whilelt p5.b, XZR, x21\n"
- "whilelt p4.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p3.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p2.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p1.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p0.b, x20, x21\n"
- "7:" // 8 rounds: predicate OK
- "ld1b { z11.b }, p5/Z, [x23]\n"
- "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
- "tbl z9.b, { z16.b }, z11.b\n"
- "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
- "sub z11.b, z11.b, z12.b\n"
- "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
- "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
- "tbl z4.b, { z16.b }, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- "tbl z3.b, { z16.b }, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- "tbl z2.b, { z16.b }, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- "tbl z1.b, { z16.b }, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- "tbl z0.b, { z16.b }, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2e89 // tbx z9.b, z20.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2e84 // tbx z4.b, z20.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282e83 // tbx z3.b, z20.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272e82 // tbx z2.b, z20.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262e81 // tbx z1.b, z20.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252e80 // tbx z0.b, z20.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2ea9 // tbx z9.b, z21.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2ea4 // tbx z4.b, z21.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282ea3 // tbx z3.b, z21.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272ea2 // tbx z2.b, z21.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262ea1 // tbx z1.b, z21.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252ea0 // tbx z0.b, z21.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- "addvl x21, x21, #-6\n"
- ".inst 0x052b2ec9 // tbx z9.b, z22.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2ec4 // tbx z4.b, z22.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282ec3 // tbx z3.b, z22.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272ec2 // tbx z2.b, z22.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262ec1 // tbx z1.b, z22.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252ec0 // tbx z0.b, z22.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- "cmp x21, XZR\n"
- ".inst 0x052b2ee9 // tbx z9.b, z23.b, z11.b\n"
- ".inst 0x052a2ee4 // tbx z4.b, z23.b, z10.b\n"
- ".inst 0x05282ee3 // tbx z3.b, z23.b, z8.b\n"
- "st1b { z9.b }, p5, [x22]\n"
- ".inst 0x05272ee2 // tbx z2.b, z23.b, z7.b\n"
- ".inst 0x05262ee1 // tbx z1.b, z23.b, z6.b\n"
- "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
- ".inst 0x05252ee0 // tbx z0.b, z23.b, z5.b\n"
- "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
- "addvl x23, x23, #6\n"
- "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
- "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
- "addvl x22, x22, #6\n"
- "bgt 6b\n"
- "b 17f\n"
- "8:" // 512 bits
- "mov z12.b, #0x40\n"
- "mov x21, %x[string_length]\n"
- "ptrue p5.b\n"
- "ptrue p4.b\n"
- "ptrue p3.b\n"
- "ptrue p2.b\n"
- "ptrue p1.b\n"
- "ptrue p0.b\n"
- "9:" // 4 rounds: width loop
- "addvl x20, x21, #-6\n"
- "cmp x20, XZR\n"
- "bge 10f\n"
- "mov x20, #0x0\n"
- "addvl x20, x20, #1\n"
- "whilelt p5.b, XZR, x21\n"
- "whilelt p4.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p3.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p2.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p1.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p0.b, x20, x21\n"
- "10:" // 4 rounds: predicate OK
- "ld1b { z11.b }, p5/Z, [x23]\n"
- "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
- "tbl z9.b, { z16.b }, z11.b\n"
- "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
- "sub z11.b, z11.b, z12.b\n"
- "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
- "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
- "tbl z4.b, { z16.b }, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- "tbl z3.b, { z16.b }, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- "tbl z2.b, { z16.b }, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- "tbl z1.b, { z16.b }, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- "tbl z0.b, { z16.b }, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- "addvl x21, x21, #-6\n"
- ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
- "sub z11.b, z11.b, z12.b\n"
- ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- "cmp x21, XZR\n"
- ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
- ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
- ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
- "st1b { z9.b }, p5, [x22]\n"
- ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
- ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
- "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
- ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
- "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
- "addvl x23, x23, #6\n"
- "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
- "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
- "addvl x22, x22, #6\n"
- "bgt 9b\n"
- "b 17f\n"
- "11:" // 1024 bits
- "mov z12.b, #0x80\n"
- "mov x21, %x[string_length]\n"
- "ptrue p5.b\n"
- "ptrue p4.b\n"
- "ptrue p3.b\n"
- "ptrue p2.b\n"
- "ptrue p1.b\n"
- "ptrue p0.b\n"
- "12:" // 2 rounds: width loop
- "addvl x20, x21, #-6\n"
- "cmp x20, XZR\n"
- "bge 13f\n"
- "mov x20, #0x0\n"
- "addvl x20, x20, #1\n"
- "whilelt p5.b, XZR, x21\n"
- "whilelt p4.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p3.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p2.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p1.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p0.b, x20, x21\n"
- "13:" // 2 rounds: predicate OK
- "ld1b { z11.b }, p5/Z, [x23]\n"
- "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
- "addvl x21, x21, #-6\n"
- "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
- "tbl z9.b, { z16.b }, z11.b\n"
- "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
- "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
- "sub z11.b, z11.b, z12.b\n"
- "tbl z4.b, { z16.b }, z10.b\n"
- "sub z10.b, z10.b, z12.b\n"
- "tbl z3.b, { z16.b }, z8.b\n"
- "sub z8.b, z8.b, z12.b\n"
- "tbl z2.b, { z16.b }, z7.b\n"
- "sub z7.b, z7.b, z12.b\n"
- "tbl z1.b, { z16.b }, z6.b\n"
- "sub z6.b, z6.b, z12.b\n"
- "tbl z0.b, { z16.b }, z5.b\n"
- "sub z5.b, z5.b, z12.b\n"
- "cmp x21, XZR\n"
- ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
- ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
- ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
- "st1b { z9.b }, p5, [x22]\n"
- ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
- ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
- "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
- ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
- "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
- "addvl x23, x23, #6\n"
- "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
- "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
- "addvl x22, x22, #6\n"
- "bgt 12b\n"
- "b 17f\n"
- "14:" // 2048 bits
- "mov x21, %x[string_length]\n"
- "ptrue p5.b\n"
- "ptrue p4.b\n"
- "ptrue p3.b\n"
- "ptrue p2.b\n"
- "ptrue p1.b\n"
- "ptrue p0.b\n"
- "15:" // 1 rounds: width loop
- "addvl x20, x21, #-6\n"
- "cmp x20, XZR\n"
- "bge 16f\n"
- "mov x20, #0x0\n"
- "addvl x20, x20, #1\n"
- "whilelt p5.b, XZR, x21\n"
- "whilelt p4.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p3.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p2.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p1.b, x20, x21\n"
- "addvl x20, x20, #1\n"
- "whilelt p0.b, x20, x21\n"
- "16:" // 1 rounds: predicate OK
- "addvl x21, x21, #-6\n"
- "ld1b { z11.b }, p5/Z, [x23]\n"
- "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
- "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
- "cmp x21, XZR\n"
- "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
- "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
- "tbl z9.b, { z16.b }, z11.b\n"
- "tbl z4.b, { z16.b }, z10.b\n"
- "tbl z3.b, { z16.b }, z8.b\n"
- "st1b { z9.b }, p5, [x22]\n"
- "tbl z2.b, { z16.b }, z7.b\n"
- "tbl z1.b, { z16.b }, z6.b\n"
- "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
- "tbl z0.b, { z16.b }, z5.b\n"
- "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
- "addvl x23, x23, #6\n"
- "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
- "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
- "addvl x22, x22, #6\n"
- "bgt 15b\n"
- "17:" // SVE body done
- "add x24, x24, #0x1\n"
- "cmp x24, %x[num_strings]\n"
- "bne 2b\n"
- : [table] "+&r"(table)
- : [input] "r"(input), [num_strings] "r"(num_strings), [output] "r"(output), [string_length] "r"(string_length)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31");
-}
-#endif // __aarch64__
-} // namespace
-
#ifdef __aarch64__
void sve_q8_activation_lut(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
{
@@ -657,7 +42,7 @@ void sve_q8_activation_lut(const ITensor *src, ITensor *dst, const ActivationLay
{
const auto input_ptr = input.ptr();
auto output_ptr = output.ptr();
- substitute_bytes_sve(act_info.lut().data(), 1u, window_end_x, &input_ptr, &output_ptr);
+ lut_u8_sve(act_info.lut().data(), 1u, window_end_x, &input_ptr, &output_ptr);
},
input, output);
}
diff --git a/src/cpu/kernels/elementwise_unary/generic/neon/fp16.cpp b/src/cpu/kernels/elementwise_unary/generic/neon/fp16.cpp
index 976d006f11..b2833c2481 100644
--- a/src/cpu/kernels/elementwise_unary/generic/neon/fp16.cpp
+++ b/src/cpu/kernels/elementwise_unary/generic/neon/fp16.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,8 +29,9 @@ namespace arm_compute
{
namespace cpu
{
-void neon_fp16_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+void neon_fp16_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
{
+ ARM_COMPUTE_UNUSED(lut);
return elementwise_op<__fp16>(in, out, window, op);
}
}
diff --git a/src/cpu/kernels/elementwise_unary/generic/neon/fp32.cpp b/src/cpu/kernels/elementwise_unary/generic/neon/fp32.cpp
index 21f4d9d326..6566821eca 100644
--- a/src/cpu/kernels/elementwise_unary/generic/neon/fp32.cpp
+++ b/src/cpu/kernels/elementwise_unary/generic/neon/fp32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,8 +28,9 @@ namespace arm_compute
{
namespace cpu
{
-void neon_fp32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+void neon_fp32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
{
+ ARM_COMPUTE_UNUSED(lut);
return elementwise_op<float>(in, out, window, op);
}
}
diff --git a/src/cpu/kernels/elementwise_unary/generic/neon/integer.cpp b/src/cpu/kernels/elementwise_unary/generic/neon/integer.cpp
index ef3120e206..dfe5e30035 100644
--- a/src/cpu/kernels/elementwise_unary/generic/neon/integer.cpp
+++ b/src/cpu/kernels/elementwise_unary/generic/neon/integer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,8 +28,9 @@ namespace arm_compute
{
namespace cpu
{
-void neon_s32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+void neon_s32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
{
+ ARM_COMPUTE_UNUSED(lut);
return elementwise_op<int32_t>(in, out, window, op);
}
}
diff --git a/src/cpu/kernels/elementwise_unary/generic/neon/q8.cpp b/src/cpu/kernels/elementwise_unary/generic/neon/q8.cpp
new file mode 100644
index 0000000000..08bb7f28b6
--- /dev/null
+++ b/src/cpu/kernels/elementwise_unary/generic/neon/q8.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/Helpers.h"
+#include "src/cpu/kernels/lut/list.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+#ifdef __aarch64__
+
+void neon_q8_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
+{
+ ARM_COMPUTE_UNUSED(op);
+
+ auto win = window;
+ const auto window_end_x = window.x().end();
+ win.set(0, Window::Dimension(0, 1, 1));
+
+ Iterator src_it(in, win);
+ Iterator dst_it(out, win);
+
+ execute_window_loop(win, [&](const Coordinates &) {
+ const auto src_ptr = src_it.ptr();
+ auto dst_ptr = dst_it.ptr();
+
+ lut_u8_neon(lut, 1, window_end_x, &src_ptr, &dst_ptr);
+ },
+ src_it, dst_it);
+}
+
+#endif // __aarch64__
+
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/elementwise_unary/generic/sve/fp16.cpp b/src/cpu/kernels/elementwise_unary/generic/sve/fp16.cpp
index ba29b3d855..01567a7852 100644
--- a/src/cpu/kernels/elementwise_unary/generic/sve/fp16.cpp
+++ b/src/cpu/kernels/elementwise_unary/generic/sve/fp16.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,10 +29,11 @@ namespace arm_compute
{
namespace cpu
{
-void sve_fp16_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+void sve_fp16_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
{
+ ARM_COMPUTE_UNUSED(lut);
return elementwise_sve_op<float16_t>(in, out, window, op);
}
}
} // namespace arm_compute
-#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */ \ No newline at end of file
+#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
diff --git a/src/cpu/kernels/elementwise_unary/generic/sve/fp32.cpp b/src/cpu/kernels/elementwise_unary/generic/sve/fp32.cpp
index c5222c5e4e..47645ff80f 100644
--- a/src/cpu/kernels/elementwise_unary/generic/sve/fp32.cpp
+++ b/src/cpu/kernels/elementwise_unary/generic/sve/fp32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,8 +29,9 @@ namespace arm_compute
{
namespace cpu
{
-void sve_fp32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+void sve_fp32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
{
+ ARM_COMPUTE_UNUSED(lut);
return elementwise_sve_op<float32_t>(in, out, window, op);
}
}
diff --git a/src/cpu/kernels/elementwise_unary/generic/sve/integer.cpp b/src/cpu/kernels/elementwise_unary/generic/sve/integer.cpp
index 984056a426..068c3f7cda 100644
--- a/src/cpu/kernels/elementwise_unary/generic/sve/integer.cpp
+++ b/src/cpu/kernels/elementwise_unary/generic/sve/integer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,8 +29,9 @@ namespace arm_compute
{
namespace cpu
{
-void sve_s32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+void sve_s32_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
{
+ ARM_COMPUTE_UNUSED(lut);
return elementwise_sve_op<int32_t>(in, out, window, op);
}
}
diff --git a/src/cpu/kernels/elementwise_unary/generic/sve/q8.cpp b/src/cpu/kernels/elementwise_unary/generic/sve/q8.cpp
new file mode 100644
index 0000000000..b68f691086
--- /dev/null
+++ b/src/cpu/kernels/elementwise_unary/generic/sve/q8.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/Helpers.h"
+#include "src/cpu/kernels/lut/list.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+void sve_q8_elementwise_unary(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
+{
+ ARM_COMPUTE_UNUSED(op);
+
+ auto win = window;
+ const auto window_end_x = window.x().end();
+ win.set(0, Window::Dimension(0, 1, 1));
+
+ Iterator src_it(in, win);
+ Iterator dst_it(out, win);
+
+ execute_window_loop(win, [&](const Coordinates &) {
+ const auto src_ptr = src_it.ptr();
+ auto dst_ptr = dst_it.ptr();
+
+ lut_u8_sve(lut, 1, window_end_x, &src_ptr, &dst_ptr);
+ },
+ src_it, dst_it);
+}
+
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/elementwise_unary/list.h b/src/cpu/kernels/elementwise_unary/list.h
index 2a41b74c51..04c3bb6bcb 100644
--- a/src/cpu/kernels/elementwise_unary/list.h
+++ b/src/cpu/kernels/elementwise_unary/list.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,17 +32,19 @@ namespace arm_compute
namespace cpu
{
#define DECLARE_ELEMETWISE_UNARY_KERNEL(func_name) \
- void func_name(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op)
+ void func_name(const ITensor *in, ITensor *out, const Window &window, ElementWiseUnary op, const uint8_t *lut)
DECLARE_ELEMETWISE_UNARY_KERNEL(sve_fp32_elementwise_unary);
DECLARE_ELEMETWISE_UNARY_KERNEL(sve_fp16_elementwise_unary);
DECLARE_ELEMETWISE_UNARY_KERNEL(sve_s32_elementwise_unary);
+DECLARE_ELEMETWISE_UNARY_KERNEL(sve_q8_elementwise_unary);
DECLARE_ELEMETWISE_UNARY_KERNEL(neon_fp32_elementwise_unary);
DECLARE_ELEMETWISE_UNARY_KERNEL(neon_fp16_elementwise_unary);
DECLARE_ELEMETWISE_UNARY_KERNEL(neon_s32_elementwise_unary);
+DECLARE_ELEMETWISE_UNARY_KERNEL(neon_q8_elementwise_unary);
#undef DECLARE_ELEMETWISE_UNARY_KERNEL
} // namespace cpu
} // namespace arm_compute
-#endif // SRC_CORE_KERNELS_ELEMETWISE_UNARY_LIST_H \ No newline at end of file
+#endif // SRC_CORE_KERNELS_ELEMETWISE_UNARY_LIST_H
diff --git a/src/cpu/kernels/lut/generic/neon/u8.cpp b/src/cpu/kernels/lut/generic/neon/u8.cpp
new file mode 100644
index 0000000000..8ab647bfee
--- /dev/null
+++ b/src/cpu/kernels/lut/generic/neon/u8.cpp
@@ -0,0 +1,410 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/cpu/kernels/lut/list.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+#ifdef __aarch64__
+
+void lut_u8_neon(
+ const uint8_t *table,
+ size_t num_strings,
+ size_t string_length,
+ const uint8_t *const *input,
+ uint8_t *const *output)
+{
+ __asm__ __volatile__(
+ "ldr q16, [%x[table], #0x0]\n"
+ "ldr q17, [%x[table], #0x10]\n"
+ "mov x23, #0x0\n"
+ "ldr q18, [%x[table], #0x20]\n"
+ "ldr q19, [%x[table], #0x30]\n"
+ "ldr q20, [%x[table], #0x40]\n"
+ "ldr q21, [%x[table], #0x50]\n"
+ "ldr q22, [%x[table], #0x60]\n"
+ "ldr q23, [%x[table], #0x70]\n"
+ "ldr q24, [%x[table], #0x80]\n"
+ "ldr q25, [%x[table], #0x90]\n"
+ "ldr q26, [%x[table], #0xa0]\n"
+ "ldr q27, [%x[table], #0xb0]\n"
+ "ldr q28, [%x[table], #0xc0]\n"
+ "ldr q29, [%x[table], #0xd0]\n"
+ "ldr q30, [%x[table], #0xe0]\n"
+ "ldr q31, [%x[table], #0xf0]\n"
+ "1:" // string loop
+ "ldr x22, [%x[input], x23, LSL #0x3]\n"
+ "ldr x21, [%x[output], x23, LSL #0x3]\n"
+ "movi v11.16b, #0x40\n"
+ "movi v10.16b, #0x80\n"
+ "movi v9.16b, #0xc0\n"
+ "mov x20, %x[string_length]\n"
+ "2:" // 4 rounds: width loop
+ "cmp x20, #0x30\n"
+ "bge 27f\n"
+ "tbz x20, #5, 10f\n"
+ "ld1 { v8.16b }, [x22], #0x10\n"
+ "ld1 { v13.16b }, [x22], #0x10\n"
+ "tbz x20, #3, 6f\n"
+ "ldr d12, [x22], #0x8\n"
+ "tbz x20, #2, 4f\n"
+ "ld1 { v12.s }[2], [x22], #0x4\n"
+ "tbz x20, #1, 3f\n"
+ "ld1 { v12.h }[6], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v12.b }[14], [x22]\n"
+ "b 26f\n"
+ "3:" // 4 rounds: Partial load: partial_1_44
+ "tbz x20, #0, 26f\n"
+ "ld1 { v12.b }[12], [x22]\n"
+ "b 26f\n"
+ "4:" // 4 rounds: Partial load: partial_2_40
+ "tbz x20, #1, 5f\n"
+ "ld1 { v12.h }[4], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v12.b }[10], [x22]\n"
+ "b 26f\n"
+ "5:" // 4 rounds: Partial load: partial_1_40
+ "tbz x20, #0, 26f\n"
+ "ld1 { v12.b }[8], [x22]\n"
+ "b 26f\n"
+ "6:" // 4 rounds: Partial load: partial_4_32
+ "tbz x20, #2, 8f\n"
+ "ldr s12, [x22], #0x4\n"
+ "tbz x20, #1, 7f\n"
+ "ld1 { v12.h }[2], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v12.b }[6], [x22]\n"
+ "b 26f\n"
+ "7:" // 4 rounds: Partial load: partial_1_36
+ "tbz x20, #0, 26f\n"
+ "ld1 { v12.b }[4], [x22]\n"
+ "b 26f\n"
+ "8:" // 4 rounds: Partial load: partial_2_32
+ "tbz x20, #1, 9f\n"
+ "ldr h12, [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v12.b }[2], [x22]\n"
+ "b 26f\n"
+ "9:" // 4 rounds: Partial load: partial_1_32
+ "tbz x20, #0, 26f\n"
+ "ldr b12, [x22, #0x0]\n"
+ "b 26f\n"
+ "10:" // 4 rounds: Partial load: partial_16_0
+ "tbz x20, #4, 18f\n"
+ "ld1 { v8.16b }, [x22], #0x10\n"
+ "tbz x20, #3, 14f\n"
+ "ldr d13, [x22], #0x8\n"
+ "tbz x20, #2, 12f\n"
+ "ld1 { v13.s }[2], [x22], #0x4\n"
+ "tbz x20, #1, 11f\n"
+ "ld1 { v13.h }[6], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v13.b }[14], [x22]\n"
+ "b 26f\n"
+ "11:" // 4 rounds: Partial load: partial_1_28
+ "tbz x20, #0, 26f\n"
+ "ld1 { v13.b }[12], [x22]\n"
+ "b 26f\n"
+ "12:" // 4 rounds: Partial load: partial_2_24
+ "tbz x20, #1, 13f\n"
+ "ld1 { v13.h }[4], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v13.b }[10], [x22]\n"
+ "b 26f\n"
+ "13:" // 4 rounds: Partial load: partial_1_24
+ "tbz x20, #0, 26f\n"
+ "ld1 { v13.b }[8], [x22]\n"
+ "b 26f\n"
+ "14:" // 4 rounds: Partial load: partial_4_16
+ "tbz x20, #2, 16f\n"
+ "ldr s13, [x22], #0x4\n"
+ "tbz x20, #1, 15f\n"
+ "ld1 { v13.h }[2], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v13.b }[6], [x22]\n"
+ "b 26f\n"
+ "15:" // 4 rounds: Partial load: partial_1_20
+ "tbz x20, #0, 26f\n"
+ "ld1 { v13.b }[4], [x22]\n"
+ "b 26f\n"
+ "16:" // 4 rounds: Partial load: partial_2_16
+ "tbz x20, #1, 17f\n"
+ "ldr h13, [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v13.b }[2], [x22]\n"
+ "b 26f\n"
+ "17:" // 4 rounds: Partial load: partial_1_16
+ "tbz x20, #0, 26f\n"
+ "ldr b13, [x22, #0x0]\n"
+ "b 26f\n"
+ "18:" // 4 rounds: Partial load: partial_8_0
+ "tbz x20, #3, 22f\n"
+ "ldr d8, [x22], #0x8\n"
+ "tbz x20, #2, 20f\n"
+ "ld1 { v8.s }[2], [x22], #0x4\n"
+ "tbz x20, #1, 19f\n"
+ "ld1 { v8.h }[6], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v8.b }[14], [x22]\n"
+ "b 26f\n"
+ "19:" // 4 rounds: Partial load: partial_1_12
+ "tbz x20, #0, 26f\n"
+ "ld1 { v8.b }[12], [x22]\n"
+ "b 26f\n"
+ "20:" // 4 rounds: Partial load: partial_2_8
+ "tbz x20, #1, 21f\n"
+ "ld1 { v8.h }[4], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v8.b }[10], [x22]\n"
+ "b 26f\n"
+ "21:" // 4 rounds: Partial load: partial_1_8
+ "tbz x20, #0, 26f\n"
+ "ld1 { v8.b }[8], [x22]\n"
+ "b 26f\n"
+ "22:" // 4 rounds: Partial load: partial_4_0
+ "tbz x20, #2, 24f\n"
+ "ldr s8, [x22], #0x4\n"
+ "tbz x20, #1, 23f\n"
+ "ld1 { v8.h }[2], [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v8.b }[6], [x22]\n"
+ "b 26f\n"
+ "23:" // 4 rounds: Partial load: partial_1_4
+ "tbz x20, #0, 26f\n"
+ "ld1 { v8.b }[4], [x22]\n"
+ "b 26f\n"
+ "24:" // 4 rounds: Partial load: partial_2_0
+ "tbz x20, #1, 25f\n"
+ "ldr h8, [x22], #0x2\n"
+ "tbz x20, #0, 26f\n"
+ "ld1 { v8.b }[2], [x22]\n"
+ "b 26f\n"
+ "25:" // 4 rounds: Partial load: partial_1_0
+ "ldr b8, [x22, #0x0]\n"
+ "26:" // 4 rounds: Partial load: Done
+ "b 28f\n"
+ "27:" // 4 rounds: Full load
+ "ldr q8, [x22, #0x0]\n"
+ "ldr q13, [x22, #0x10]\n"
+ "ldr q12, [x22, #0x20]\n"
+ "add x22, x22, #0x30\n"
+ "28:" // 4 rounds: Load done
+ "sub v0.16b, v8.16b, v11.16b\n"
+ "sub v7.16b, v8.16b, v10.16b\n"
+ "tbl v0.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v0.16b\n"
+ "sub v6.16b, v8.16b, v9.16b\n"
+ "sub v5.16b, v13.16b, v11.16b\n"
+ "tbl v8.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v8.16b\n"
+ "sub v4.16b, v13.16b, v10.16b\n"
+ "sub v3.16b, v13.16b, v9.16b\n"
+ "tbl v7.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v7.16b\n"
+ "sub v2.16b, v12.16b, v11.16b\n"
+ "sub v1.16b, v12.16b, v10.16b\n"
+ "tbl v6.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v6.16b\n"
+ "tbl v13.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v13.16b\n"
+ "tbl v5.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v5.16b\n"
+ "orr v8.16b, v8.16b, v0.16b\n"
+ "sub v0.16b, v12.16b, v9.16b\n"
+ "tbl v4.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v4.16b\n"
+ "tbl v3.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v3.16b\n"
+ "tbl v12.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v12.16b\n"
+ "tbl v2.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v2.16b\n"
+ "orr v7.16b, v7.16b, v6.16b\n"
+ "tbl v1.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v1.16b\n"
+ "tbl v0.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v0.16b\n"
+ "orr v13.16b, v13.16b, v5.16b\n"
+ "orr v4.16b, v4.16b, v3.16b\n"
+ "orr v12.16b, v12.16b, v2.16b\n"
+ "cmp x20, #0x30\n"
+ "orr v1.16b, v1.16b, v0.16b\n"
+ "orr v8.16b, v8.16b, v7.16b\n"
+ "orr v13.16b, v13.16b, v4.16b\n"
+ "orr v12.16b, v12.16b, v1.16b\n"
+ "bge 53f\n"
+ "tbz x20, #5, 36f\n"
+ "st1 { v8.16b }, [x21], #0x10\n"
+ "st1 { v13.16b }, [x21], #0x10\n"
+ "tbz x20, #3, 32f\n"
+ "str d12, [x21], #0x8\n"
+ "tbz x20, #2, 30f\n"
+ "st1 { v12.s }[2], [x21], #0x4\n"
+ "tbz x20, #1, 29f\n"
+ "st1 { v12.h }[6], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v12.b }[14], [x21]\n"
+ "b 52f\n"
+ "29:" // 4 rounds: Partial writeback: partial_1_44
+ "tbz x20, #0, 52f\n"
+ "st1 { v12.b }[12], [x21]\n"
+ "b 52f\n"
+ "30:" // 4 rounds: Partial writeback: partial_2_40
+ "tbz x20, #1, 31f\n"
+ "st1 { v12.h }[4], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v12.b }[10], [x21]\n"
+ "b 52f\n"
+ "31:" // 4 rounds: Partial writeback: partial_1_40
+ "tbz x20, #0, 52f\n"
+ "st1 { v12.b }[8], [x21]\n"
+ "b 52f\n"
+ "32:" // 4 rounds: Partial writeback: partial_4_32
+ "tbz x20, #2, 34f\n"
+ "str s12, [x21], #0x4\n"
+ "tbz x20, #1, 33f\n"
+ "st1 { v12.h }[2], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v12.b }[6], [x21]\n"
+ "b 52f\n"
+ "33:" // 4 rounds: Partial writeback: partial_1_36
+ "tbz x20, #0, 52f\n"
+ "st1 { v12.b }[4], [x21]\n"
+ "b 52f\n"
+ "34:" // 4 rounds: Partial writeback: partial_2_32
+ "tbz x20, #1, 35f\n"
+ "str h12, [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v12.b }[2], [x21]\n"
+ "b 52f\n"
+ "35:" // 4 rounds: Partial writeback: partial_1_32
+ "tbz x20, #0, 52f\n"
+ "str b12, [x21, #0x0]\n"
+ "b 52f\n"
+ "36:" // 4 rounds: Partial writeback: partial_16_0
+ "tbz x20, #4, 44f\n"
+ "st1 { v8.16b }, [x21], #0x10\n"
+ "tbz x20, #3, 40f\n"
+ "str d13, [x21], #0x8\n"
+ "tbz x20, #2, 38f\n"
+ "st1 { v13.s }[2], [x21], #0x4\n"
+ "tbz x20, #1, 37f\n"
+ "st1 { v13.h }[6], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v13.b }[14], [x21]\n"
+ "b 52f\n"
+ "37:" // 4 rounds: Partial writeback: partial_1_28
+ "tbz x20, #0, 52f\n"
+ "st1 { v13.b }[12], [x21]\n"
+ "b 52f\n"
+ "38:" // 4 rounds: Partial writeback: partial_2_24
+ "tbz x20, #1, 39f\n"
+ "st1 { v13.h }[4], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v13.b }[10], [x21]\n"
+ "b 52f\n"
+ "39:" // 4 rounds: Partial writeback: partial_1_24
+ "tbz x20, #0, 52f\n"
+ "st1 { v13.b }[8], [x21]\n"
+ "b 52f\n"
+ "40:" // 4 rounds: Partial writeback: partial_4_16
+ "tbz x20, #2, 42f\n"
+ "str s13, [x21], #0x4\n"
+ "tbz x20, #1, 41f\n"
+ "st1 { v13.h }[2], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v13.b }[6], [x21]\n"
+ "b 52f\n"
+ "41:" // 4 rounds: Partial writeback: partial_1_20
+ "tbz x20, #0, 52f\n"
+ "st1 { v13.b }[4], [x21]\n"
+ "b 52f\n"
+ "42:" // 4 rounds: Partial writeback: partial_2_16
+ "tbz x20, #1, 43f\n"
+ "str h13, [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v13.b }[2], [x21]\n"
+ "b 52f\n"
+ "43:" // 4 rounds: Partial writeback: partial_1_16
+ "tbz x20, #0, 52f\n"
+ "str b13, [x21, #0x0]\n"
+ "b 52f\n"
+ "44:" // 4 rounds: Partial writeback: partial_8_0
+ "tbz x20, #3, 48f\n"
+ "str d8, [x21], #0x8\n"
+ "tbz x20, #2, 46f\n"
+ "st1 { v8.s }[2], [x21], #0x4\n"
+ "tbz x20, #1, 45f\n"
+ "st1 { v8.h }[6], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v8.b }[14], [x21]\n"
+ "b 52f\n"
+ "45:" // 4 rounds: Partial writeback: partial_1_12
+ "tbz x20, #0, 52f\n"
+ "st1 { v8.b }[12], [x21]\n"
+ "b 52f\n"
+ "46:" // 4 rounds: Partial writeback: partial_2_8
+ "tbz x20, #1, 47f\n"
+ "st1 { v8.h }[4], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v8.b }[10], [x21]\n"
+ "b 52f\n"
+ "47:" // 4 rounds: Partial writeback: partial_1_8
+ "tbz x20, #0, 52f\n"
+ "st1 { v8.b }[8], [x21]\n"
+ "b 52f\n"
+ "48:" // 4 rounds: Partial writeback: partial_4_0
+ "tbz x20, #2, 50f\n"
+ "str s8, [x21], #0x4\n"
+ "tbz x20, #1, 49f\n"
+ "st1 { v8.h }[2], [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v8.b }[6], [x21]\n"
+ "b 52f\n"
+ "49:" // 4 rounds: Partial writeback: partial_1_4
+ "tbz x20, #0, 52f\n"
+ "st1 { v8.b }[4], [x21]\n"
+ "b 52f\n"
+ "50:" // 4 rounds: Partial writeback: partial_2_0
+ "tbz x20, #1, 51f\n"
+ "str h8, [x21], #0x2\n"
+ "tbz x20, #0, 52f\n"
+ "st1 { v8.b }[2], [x21]\n"
+ "b 52f\n"
+ "51:" // 4 rounds: Partial writeback: partial_1_0
+ "str b8, [x21, #0x0]\n"
+ "52:" // 4 rounds: Partial writeback: Done
+ "b 54f\n"
+ "53:" // 4 rounds: Full writeback
+ "str q8, [x21, #0x0]\n"
+ "str q13, [x21, #0x10]\n"
+ "str q12, [x21, #0x20]\n"
+ "add x21, x21, #0x30\n"
+ "54:" // 4 rounds: Writeback done
+ "subs x20, x20, #0x30\n"
+ "bgt 2b\n"
+ "add x23, x23, #0x1\n"
+ "cmp x23, %x[num_strings]\n"
+ "bne 1b\n"
+ :
+ : [input] "r"(input), [num_strings] "r"(num_strings), [output] "r"(output), [string_length] "r"(string_length), [table] "r"(table)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23");
+}
+
+#endif // __aarch64__
+
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/lut/generic/sve/u8.cpp b/src/cpu/kernels/lut/generic/sve/u8.cpp
new file mode 100644
index 0000000000..70f3a2e6fb
--- /dev/null
+++ b/src/cpu/kernels/lut/generic/sve/u8.cpp
@@ -0,0 +1,647 @@
+/*
+ * Copyright (c) 2022-2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/cpu/kernels/lut/list.h"
+
+#ifdef __aarch64__
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+void lut_u8_sve(
+ const uint8_t *table,
+ size_t num_strings,
+ size_t string_length,
+ const uint8_t *const *input,
+ uint8_t *const *output)
+{
+ __asm__ __volatile__(
+ "ptrue p0.b\n"
+ "cntd x25\n"
+ "addvl %x[table], %x[table], #8\n"
+ "ld1b { z16.b }, p0/Z, [%x[table], #-8, MUL VL]\n"
+ "tbnz x25, #5, 1f\n"
+ "ld1b { z17.b }, p0/Z, [%x[table], #-7, MUL VL]\n"
+ "tbnz x25, #4, 1f\n"
+ "ld1b { z18.b }, p0/Z, [%x[table], #-6, MUL VL]\n"
+ "ld1b { z19.b }, p0/Z, [%x[table], #-5, MUL VL]\n"
+ "tbnz x25, #3, 1f\n"
+ "ld1b { z20.b }, p0/Z, [%x[table], #-4, MUL VL]\n"
+ "ld1b { z21.b }, p0/Z, [%x[table], #-3, MUL VL]\n"
+ "ld1b { z22.b }, p0/Z, [%x[table], #-2, MUL VL]\n"
+ "ld1b { z23.b }, p0/Z, [%x[table], #-1, MUL VL]\n"
+ "tbnz x25, #2, 1f\n"
+ "ld1b { z24.b }, p0/Z, [%x[table]]\n"
+ "ld1b { z25.b }, p0/Z, [%x[table], #1, MUL VL]\n"
+ "ld1b { z26.b }, p0/Z, [%x[table], #2, MUL VL]\n"
+ "ld1b { z27.b }, p0/Z, [%x[table], #3, MUL VL]\n"
+ "ld1b { z28.b }, p0/Z, [%x[table], #4, MUL VL]\n"
+ "ld1b { z29.b }, p0/Z, [%x[table], #5, MUL VL]\n"
+ "ld1b { z30.b }, p0/Z, [%x[table], #6, MUL VL]\n"
+ "ld1b { z31.b }, p0/Z, [%x[table], #7, MUL VL]\n"
+ "1:" // Table load done
+ "mov x24, #0x0\n"
+ "2:" // string loop
+ "ldr x23, [%x[input], x24, LSL #0x3]\n"
+ "ldr x22, [%x[output], x24, LSL #0x3]\n"
+ "tbnz x25, #5, 14f\n"
+ "tbnz x25, #4, 11f\n"
+ "tbnz x25, #3, 8f\n"
+ "tbnz x25, #2, 5f\n"
+ "mov z12.b, #0x10\n"
+ "mov x21, %x[string_length]\n"
+ "ptrue p5.b\n"
+ "ptrue p4.b\n"
+ "ptrue p3.b\n"
+ "ptrue p2.b\n"
+ "ptrue p1.b\n"
+ "ptrue p0.b\n"
+ "3:" // 16 rounds: width loop
+ "addvl x20, x21, #-6\n"
+ "cmp x20, XZR\n"
+ "bge 4f\n"
+ "mov x20, #0x0\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p5.b, XZR, x21\n"
+ "whilelt p4.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p3.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p2.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p1.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p0.b, x20, x21\n"
+ "4:" // 16 rounds: predicate OK
+ "ld1b { z11.b }, p5/Z, [x23]\n"
+ "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+ "tbl z9.b, { z16.b }, z11.b\n"
+ "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+ "sub z11.b, z11.b, z12.b\n"
+ "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+ "tbl z4.b, { z16.b }, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ "tbl z3.b, { z16.b }, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ "tbl z2.b, { z16.b }, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ "tbl z1.b, { z16.b }, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ "tbl z0.b, { z16.b }, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e89 // tbx z9.b, z20.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e84 // tbx z4.b, z20.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e83 // tbx z3.b, z20.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e82 // tbx z2.b, z20.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e81 // tbx z1.b, z20.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e80 // tbx z0.b, z20.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2ea9 // tbx z9.b, z21.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2ea4 // tbx z4.b, z21.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282ea3 // tbx z3.b, z21.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272ea2 // tbx z2.b, z21.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262ea1 // tbx z1.b, z21.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252ea0 // tbx z0.b, z21.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2ec9 // tbx z9.b, z22.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2ec4 // tbx z4.b, z22.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282ec3 // tbx z3.b, z22.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272ec2 // tbx z2.b, z22.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262ec1 // tbx z1.b, z22.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252ec0 // tbx z0.b, z22.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2ee9 // tbx z9.b, z23.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2ee4 // tbx z4.b, z23.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282ee3 // tbx z3.b, z23.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272ee2 // tbx z2.b, z23.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262ee1 // tbx z1.b, z23.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252ee0 // tbx z0.b, z23.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2f09 // tbx z9.b, z24.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2f04 // tbx z4.b, z24.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282f03 // tbx z3.b, z24.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272f02 // tbx z2.b, z24.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262f01 // tbx z1.b, z24.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252f00 // tbx z0.b, z24.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2f29 // tbx z9.b, z25.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2f24 // tbx z4.b, z25.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282f23 // tbx z3.b, z25.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272f22 // tbx z2.b, z25.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262f21 // tbx z1.b, z25.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252f20 // tbx z0.b, z25.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2f49 // tbx z9.b, z26.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2f44 // tbx z4.b, z26.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282f43 // tbx z3.b, z26.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272f42 // tbx z2.b, z26.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262f41 // tbx z1.b, z26.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252f40 // tbx z0.b, z26.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2f69 // tbx z9.b, z27.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2f64 // tbx z4.b, z27.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282f63 // tbx z3.b, z27.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272f62 // tbx z2.b, z27.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262f61 // tbx z1.b, z27.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252f60 // tbx z0.b, z27.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2f89 // tbx z9.b, z28.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2f84 // tbx z4.b, z28.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282f83 // tbx z3.b, z28.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272f82 // tbx z2.b, z28.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262f81 // tbx z1.b, z28.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252f80 // tbx z0.b, z28.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2fa9 // tbx z9.b, z29.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2fa4 // tbx z4.b, z29.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282fa3 // tbx z3.b, z29.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272fa2 // tbx z2.b, z29.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262fa1 // tbx z1.b, z29.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252fa0 // tbx z0.b, z29.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ "addvl x21, x21, #-6\n"
+ ".inst 0x052b2fc9 // tbx z9.b, z30.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2fc4 // tbx z4.b, z30.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282fc3 // tbx z3.b, z30.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272fc2 // tbx z2.b, z30.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262fc1 // tbx z1.b, z30.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252fc0 // tbx z0.b, z30.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ "cmp x21, XZR\n"
+ ".inst 0x052b2fe9 // tbx z9.b, z31.b, z11.b\n"
+ ".inst 0x052a2fe4 // tbx z4.b, z31.b, z10.b\n"
+ ".inst 0x05282fe3 // tbx z3.b, z31.b, z8.b\n"
+ "st1b { z9.b }, p5, [x22]\n"
+ ".inst 0x05272fe2 // tbx z2.b, z31.b, z7.b\n"
+ ".inst 0x05262fe1 // tbx z1.b, z31.b, z6.b\n"
+ "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+ ".inst 0x05252fe0 // tbx z0.b, z31.b, z5.b\n"
+ "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+ "addvl x23, x23, #6\n"
+ "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+ "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+ "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+ "addvl x22, x22, #6\n"
+ "bgt 3b\n"
+ "b 17f\n"
+ "5:" // 256 bits
+ "mov z12.b, #0x20\n"
+ "mov x21, %x[string_length]\n"
+ "ptrue p5.b\n"
+ "ptrue p4.b\n"
+ "ptrue p3.b\n"
+ "ptrue p2.b\n"
+ "ptrue p1.b\n"
+ "ptrue p0.b\n"
+ "6:" // 8 rounds: width loop
+ "addvl x20, x21, #-6\n"
+ "cmp x20, XZR\n"
+ "bge 7f\n"
+ "mov x20, #0x0\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p5.b, XZR, x21\n"
+ "whilelt p4.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p3.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p2.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p1.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p0.b, x20, x21\n"
+ "7:" // 8 rounds: predicate OK
+ "ld1b { z11.b }, p5/Z, [x23]\n"
+ "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+ "tbl z9.b, { z16.b }, z11.b\n"
+ "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+ "sub z11.b, z11.b, z12.b\n"
+ "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+ "tbl z4.b, { z16.b }, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ "tbl z3.b, { z16.b }, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ "tbl z2.b, { z16.b }, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ "tbl z1.b, { z16.b }, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ "tbl z0.b, { z16.b }, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e89 // tbx z9.b, z20.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e84 // tbx z4.b, z20.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e83 // tbx z3.b, z20.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e82 // tbx z2.b, z20.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e81 // tbx z1.b, z20.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e80 // tbx z0.b, z20.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2ea9 // tbx z9.b, z21.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2ea4 // tbx z4.b, z21.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282ea3 // tbx z3.b, z21.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272ea2 // tbx z2.b, z21.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262ea1 // tbx z1.b, z21.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252ea0 // tbx z0.b, z21.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ "addvl x21, x21, #-6\n"
+ ".inst 0x052b2ec9 // tbx z9.b, z22.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2ec4 // tbx z4.b, z22.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282ec3 // tbx z3.b, z22.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272ec2 // tbx z2.b, z22.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262ec1 // tbx z1.b, z22.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252ec0 // tbx z0.b, z22.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ "cmp x21, XZR\n"
+ ".inst 0x052b2ee9 // tbx z9.b, z23.b, z11.b\n"
+ ".inst 0x052a2ee4 // tbx z4.b, z23.b, z10.b\n"
+ ".inst 0x05282ee3 // tbx z3.b, z23.b, z8.b\n"
+ "st1b { z9.b }, p5, [x22]\n"
+ ".inst 0x05272ee2 // tbx z2.b, z23.b, z7.b\n"
+ ".inst 0x05262ee1 // tbx z1.b, z23.b, z6.b\n"
+ "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+ ".inst 0x05252ee0 // tbx z0.b, z23.b, z5.b\n"
+ "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+ "addvl x23, x23, #6\n"
+ "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+ "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+ "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+ "addvl x22, x22, #6\n"
+ "bgt 6b\n"
+ "b 17f\n"
+ "8:" // 512 bits
+ "mov z12.b, #0x40\n"
+ "mov x21, %x[string_length]\n"
+ "ptrue p5.b\n"
+ "ptrue p4.b\n"
+ "ptrue p3.b\n"
+ "ptrue p2.b\n"
+ "ptrue p1.b\n"
+ "ptrue p0.b\n"
+ "9:" // 4 rounds: width loop
+ "addvl x20, x21, #-6\n"
+ "cmp x20, XZR\n"
+ "bge 10f\n"
+ "mov x20, #0x0\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p5.b, XZR, x21\n"
+ "whilelt p4.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p3.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p2.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p1.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p0.b, x20, x21\n"
+ "10:" // 4 rounds: predicate OK
+ "ld1b { z11.b }, p5/Z, [x23]\n"
+ "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+ "tbl z9.b, { z16.b }, z11.b\n"
+ "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+ "sub z11.b, z11.b, z12.b\n"
+ "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+ "tbl z4.b, { z16.b }, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ "tbl z3.b, { z16.b }, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ "tbl z2.b, { z16.b }, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ "tbl z1.b, { z16.b }, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ "tbl z0.b, { z16.b }, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ "addvl x21, x21, #-6\n"
+ ".inst 0x052b2e49 // tbx z9.b, z18.b, z11.b\n"
+ "sub z11.b, z11.b, z12.b\n"
+ ".inst 0x052a2e44 // tbx z4.b, z18.b, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ ".inst 0x05282e43 // tbx z3.b, z18.b, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ ".inst 0x05272e42 // tbx z2.b, z18.b, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ ".inst 0x05262e41 // tbx z1.b, z18.b, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ ".inst 0x05252e40 // tbx z0.b, z18.b, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ "cmp x21, XZR\n"
+ ".inst 0x052b2e69 // tbx z9.b, z19.b, z11.b\n"
+ ".inst 0x052a2e64 // tbx z4.b, z19.b, z10.b\n"
+ ".inst 0x05282e63 // tbx z3.b, z19.b, z8.b\n"
+ "st1b { z9.b }, p5, [x22]\n"
+ ".inst 0x05272e62 // tbx z2.b, z19.b, z7.b\n"
+ ".inst 0x05262e61 // tbx z1.b, z19.b, z6.b\n"
+ "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+ ".inst 0x05252e60 // tbx z0.b, z19.b, z5.b\n"
+ "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+ "addvl x23, x23, #6\n"
+ "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+ "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+ "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+ "addvl x22, x22, #6\n"
+ "bgt 9b\n"
+ "b 17f\n"
+ "11:" // 1024 bits
+ "mov z12.b, #0x80\n"
+ "mov x21, %x[string_length]\n"
+ "ptrue p5.b\n"
+ "ptrue p4.b\n"
+ "ptrue p3.b\n"
+ "ptrue p2.b\n"
+ "ptrue p1.b\n"
+ "ptrue p0.b\n"
+ "12:" // 2 rounds: width loop
+ "addvl x20, x21, #-6\n"
+ "cmp x20, XZR\n"
+ "bge 13f\n"
+ "mov x20, #0x0\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p5.b, XZR, x21\n"
+ "whilelt p4.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p3.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p2.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p1.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p0.b, x20, x21\n"
+ "13:" // 2 rounds: predicate OK
+ "ld1b { z11.b }, p5/Z, [x23]\n"
+ "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+ "addvl x21, x21, #-6\n"
+ "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+ "tbl z9.b, { z16.b }, z11.b\n"
+ "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+ "sub z11.b, z11.b, z12.b\n"
+ "tbl z4.b, { z16.b }, z10.b\n"
+ "sub z10.b, z10.b, z12.b\n"
+ "tbl z3.b, { z16.b }, z8.b\n"
+ "sub z8.b, z8.b, z12.b\n"
+ "tbl z2.b, { z16.b }, z7.b\n"
+ "sub z7.b, z7.b, z12.b\n"
+ "tbl z1.b, { z16.b }, z6.b\n"
+ "sub z6.b, z6.b, z12.b\n"
+ "tbl z0.b, { z16.b }, z5.b\n"
+ "sub z5.b, z5.b, z12.b\n"
+ "cmp x21, XZR\n"
+ ".inst 0x052b2e29 // tbx z9.b, z17.b, z11.b\n"
+ ".inst 0x052a2e24 // tbx z4.b, z17.b, z10.b\n"
+ ".inst 0x05282e23 // tbx z3.b, z17.b, z8.b\n"
+ "st1b { z9.b }, p5, [x22]\n"
+ ".inst 0x05272e22 // tbx z2.b, z17.b, z7.b\n"
+ ".inst 0x05262e21 // tbx z1.b, z17.b, z6.b\n"
+ "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+ ".inst 0x05252e20 // tbx z0.b, z17.b, z5.b\n"
+ "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+ "addvl x23, x23, #6\n"
+ "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+ "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+ "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+ "addvl x22, x22, #6\n"
+ "bgt 12b\n"
+ "b 17f\n"
+ "14:" // 2048 bits
+ "mov x21, %x[string_length]\n"
+ "ptrue p5.b\n"
+ "ptrue p4.b\n"
+ "ptrue p3.b\n"
+ "ptrue p2.b\n"
+ "ptrue p1.b\n"
+ "ptrue p0.b\n"
+ "15:" // 1 rounds: width loop
+ "addvl x20, x21, #-6\n"
+ "cmp x20, XZR\n"
+ "bge 16f\n"
+ "mov x20, #0x0\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p5.b, XZR, x21\n"
+ "whilelt p4.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p3.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p2.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p1.b, x20, x21\n"
+ "addvl x20, x20, #1\n"
+ "whilelt p0.b, x20, x21\n"
+ "16:" // 1 rounds: predicate OK
+ "addvl x21, x21, #-6\n"
+ "ld1b { z11.b }, p5/Z, [x23]\n"
+ "ld1b { z10.b }, p4/Z, [x23, #1, MUL VL]\n"
+ "ld1b { z8.b }, p3/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x23, #3, MUL VL]\n"
+ "cmp x21, XZR\n"
+ "ld1b { z6.b }, p1/Z, [x23, #4, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x23, #5, MUL VL]\n"
+ "tbl z9.b, { z16.b }, z11.b\n"
+ "tbl z4.b, { z16.b }, z10.b\n"
+ "tbl z3.b, { z16.b }, z8.b\n"
+ "st1b { z9.b }, p5, [x22]\n"
+ "tbl z2.b, { z16.b }, z7.b\n"
+ "tbl z1.b, { z16.b }, z6.b\n"
+ "st1b { z4.b }, p4, [x22, #1, MUL VL]\n"
+ "tbl z0.b, { z16.b }, z5.b\n"
+ "st1b { z3.b }, p3, [x22, #2, MUL VL]\n"
+ "addvl x23, x23, #6\n"
+ "st1b { z2.b }, p2, [x22, #3, MUL VL]\n"
+ "st1b { z1.b }, p1, [x22, #4, MUL VL]\n"
+ "st1b { z0.b }, p0, [x22, #5, MUL VL]\n"
+ "addvl x22, x22, #6\n"
+ "bgt 15b\n"
+ "17:" // SVE body done
+ "add x24, x24, #0x1\n"
+ "cmp x24, %x[num_strings]\n"
+ "bne 2b\n"
+ : [table] "+&r"(table)
+ : [input] "r"(input), [num_strings] "r"(num_strings), [output] "r"(output), [string_length] "r"(string_length)
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x20", "x21", "x22", "x23", "x24", "x25", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31");
+}
+
+} // namespace cpu
+} // namespace arm_compute
+
+#endif // ARM_COMPUTE_ENABLE_SVE
+#endif // __aarch64__
diff --git a/src/cpu/kernels/lut/list.h b/src/cpu/kernels/lut/list.h
new file mode 100644
index 0000000000..9749b91cfe
--- /dev/null
+++ b/src/cpu/kernels/lut/list.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef SRC_CORE_NEON_KERNELS_LUT_LIST_H
+#define SRC_CORE_NEON_KERNELS_LUT_LIST_H
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+#ifdef __aarch64__
+#define DECLARE_LUT_KERNEL(func_name) \
+ void func_name( \
+ const uint8_t *table, \
+ size_t num_strings, \
+ size_t string_length, \
+ const uint8_t *const *input, \
+ uint8_t *const *output)
+
+DECLARE_LUT_KERNEL(lut_u8_neon);
+DECLARE_LUT_KERNEL(lut_u8_sve);
+
+#undef DECLARE_LUT_KERNEL
+#endif // __aarch64__
+
+} // namespace cpu
+} // namespace arm_compute
+
+#endif // SRC_CORE_NEON_KERNELS_LUT_LIST_H
diff --git a/tests/validation/NEON/ElementwiseAbsoluteValue.cpp b/tests/validation/NEON/ElementwiseAbsoluteValue.cpp
index ccde670034..7f6a6a5bb2 100644
--- a/tests/validation/NEON/ElementwiseAbsoluteValue.cpp
+++ b/tests/validation/NEON/ElementwiseAbsoluteValue.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,8 @@ RelativeTolerance<float> tolerance_fp32(0.000001f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
RelativeTolerance<float> tolerance_fp16(0.01f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
} // namespace
TEST_SUITE(NEON)
@@ -53,6 +55,9 @@ TEST_SUITE(AbsLayer)
template <typename T>
using NEAbsLayerFixture = AbsValidationFixture<Tensor, Accessor, NEAbsLayer, T>;
+template <typename T>
+using NEAbsLayerQuantizedFixture = AbsQuantizedValidationFixture<Tensor, Accessor, NEAbsLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -107,6 +112,32 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEAbsLayerFixture<int32_t>, framework::DatasetM
TEST_SUITE_END() // S32
TEST_SUITE_END() // Integer
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEAbsLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.2, -3) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.5, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEAbsLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.075, 6) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.1, -7) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
TEST_SUITE_END() // AbsLayer
TEST_SUITE_END() // Neon
} // namespace validation
diff --git a/tests/validation/NEON/ElementwiseExpLayer.cpp b/tests/validation/NEON/ElementwiseExpLayer.cpp
index f9e5f39989..e8940c5385 100644
--- a/tests/validation/NEON/ElementwiseExpLayer.cpp
+++ b/tests/validation/NEON/ElementwiseExpLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,8 @@ RelativeTolerance<float> tolerance_fp32(0.000001f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
RelativeTolerance<float> tolerance_fp16(0.01f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(ExpLayer)
@@ -53,6 +55,9 @@ TEST_SUITE(ExpLayer)
template <typename T>
using NEExpLayerFixture = ExpValidationFixture<Tensor, Accessor, NEExpLayer, T>;
+template <typename T>
+using NEExpLayerQuantizedFixture = ExpQuantizedValidationFixture<Tensor, Accessor, NEExpLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -82,6 +87,33 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEExpLayerFixture<float>, framework::DatasetMod
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEExpLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.01, 0) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.003, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEExpLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.02, -1) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.002, -2) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
+
TEST_SUITE_END() // ExpLayer
TEST_SUITE_END() // Neon
} // namespace validation
diff --git a/tests/validation/NEON/ElementwiseLog.cpp b/tests/validation/NEON/ElementwiseLog.cpp
index 3aa7fb3665..49a88ced1c 100644
--- a/tests/validation/NEON/ElementwiseLog.cpp
+++ b/tests/validation/NEON/ElementwiseLog.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,8 @@ RelativeTolerance<float> tolerance_fp32(0.000001f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
RelativeTolerance<float> tolerance_fp16(0.01f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(LogLayer)
@@ -53,6 +55,9 @@ TEST_SUITE(LogLayer)
template <typename T>
using NELogLayerFixture = LogValidationFixture<Tensor, Accessor, NELogLayer, T>;
+template <typename T>
+using NELogLayerQuantizedFixture = LogQuantizedValidationFixture<Tensor, Accessor, NELogLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -88,6 +93,33 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NELogLayerFixture<float>, framework::DatasetMod
}
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NELogLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(10.5, 0) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(5, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NELogLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.75, -128) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(12.5, -2) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
TEST_SUITE_END() // LogLayer
TEST_SUITE_END() // Neon
} // namespace validation
diff --git a/tests/validation/NEON/ElementwiseNegation.cpp b/tests/validation/NEON/ElementwiseNegation.cpp
index 0b63588d8a..038058c70c 100644
--- a/tests/validation/NEON/ElementwiseNegation.cpp
+++ b/tests/validation/NEON/ElementwiseNegation.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,8 @@ RelativeTolerance<float> tolerance_fp32(0.000001f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
RelativeTolerance<float> tolerance_fp16(0.01f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(NegLayer)
@@ -53,6 +55,9 @@ TEST_SUITE(NegLayer)
template <typename T>
using NENegLayerFixture = NegValidationInPlaceFixture<Tensor, Accessor, NENegLayer, T>;
+template <typename T>
+using NENegLayerQuantizedFixture = NegQuantizedValidationFixture<Tensor, Accessor, NENegLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -113,6 +118,32 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NENegLayerFixture<int32_t>, framework::DatasetM
TEST_SUITE_END() // S32
TEST_SUITE_END() // Integer
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NENegLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.2, -3) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.5, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NENegLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.075, 6) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.1, -7) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
TEST_SUITE_END() // NegLayer
TEST_SUITE_END() // Neon
} // namespace validation
diff --git a/tests/validation/NEON/ElementwiseRound.cpp b/tests/validation/NEON/ElementwiseRound.cpp
index d2f0b456a0..a6ff47c830 100644
--- a/tests/validation/NEON/ElementwiseRound.cpp
+++ b/tests/validation/NEON/ElementwiseRound.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,12 +40,20 @@ namespace test
{
namespace validation
{
+namespace
+{
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
+} // namespace
TEST_SUITE(NEON)
TEST_SUITE(RoundLayer)
template <typename T>
using NERoundLayerFixture = RoundValidationFixture<Tensor, Accessor, NERoundLayer, T>;
+template <typename T>
+using NERoundLayerQuantizedFixture = RoundQuantizedValidationFixture<Tensor, Accessor, NERoundLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -81,6 +89,33 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NERoundLayerFixture<float>, framework::DatasetM
}
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NERoundLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.2, -3) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.5, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NERoundLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.075, 6) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.1, -7) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
TEST_SUITE_END() // RoundLayer
TEST_SUITE_END() // Neon
} // namespace validation
diff --git a/tests/validation/NEON/ElementwiseRsqrtLayer.cpp b/tests/validation/NEON/ElementwiseRsqrtLayer.cpp
index 2d52183b15..1d291ac6dc 100644
--- a/tests/validation/NEON/ElementwiseRsqrtLayer.cpp
+++ b/tests/validation/NEON/ElementwiseRsqrtLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,8 @@ RelativeTolerance<float> tolerance_fp32(0.000001f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
RelativeTolerance<float> tolerance_fp16(0.01f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(RsqrtLayer)
@@ -72,6 +74,9 @@ TEST_SUITE_END() // DynamicShape
template <typename T>
using NERsqrtLayerFixture = RsqrtValidationFixture<Tensor, Accessor, NERsqrtLayer, T>;
+template <typename T>
+using NERsqrtLayerQuantizedFixture = RsqrtQuantizedValidationFixture<Tensor, Accessor, NERsqrtLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -102,6 +107,32 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NERsqrtLayerFixture<float>, framework::DatasetM
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NERsqrtLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(20, 0) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.5, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NERsqrtLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(25, -128) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(0.1, -7) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
TEST_SUITE_END() // RsqrtLayer
TEST_SUITE_END() // Neon
} // namespace validation
diff --git a/tests/validation/NEON/ElementwiseSin.cpp b/tests/validation/NEON/ElementwiseSin.cpp
index 06775c0690..76f4c50b46 100644
--- a/tests/validation/NEON/ElementwiseSin.cpp
+++ b/tests/validation/NEON/ElementwiseSin.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,8 @@ AbsoluteTolerance<float> tolerance_fp32(0.00001f);
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
AbsoluteTolerance<float> tolerance_fp16(0.0005f);
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
+constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_signed(0);
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(SinLayer)
@@ -53,6 +55,9 @@ TEST_SUITE(SinLayer)
template <typename T>
using NESinLayerFixture = SinValidationFixture<Tensor, Accessor, NESinLayer, T>;
+template <typename T>
+using NESinLayerQuantizedFixture = SinQuantizedValidationFixture<Tensor, Accessor, NESinLayer, T>;
+
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
@@ -89,6 +94,33 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NESinLayerFixture<float>, framework::DatasetMod
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NESinLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.2, -3) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(200, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE(QASYMM8_SIGNED)
+FIXTURE_DATA_TEST_CASE(RunSmall, NESinLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(
+ datasets::SmallShapes(),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("InputQInfo", { QuantizationInfo(0.07, 6) })),
+ framework::dataset::make("OutputQInfo", { QuantizationInfo(123, -7) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+
+TEST_SUITE_END() // Quantized
+
TEST_SUITE_END() // SinLayer
TEST_SUITE_END() // Neon
} // namespace validation
diff --git a/tests/validation/fixtures/ElementwiseUnaryFixture.h b/tests/validation/fixtures/ElementwiseUnaryFixture.h
index 1dc4f03e99..9b40d34d2b 100644
--- a/tests/validation/fixtures/ElementwiseUnaryFixture.h
+++ b/tests/validation/fixtures/ElementwiseUnaryFixture.h
@@ -24,8 +24,10 @@
#ifndef ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE
#define ARM_COMPUTE_TEST_ELEMENTWISE_UNARY_FIXTURE
+#include "arm_compute/core/QuantizationInfo.h"
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Utils.h"
#include "tests/AssetsLibrary.h"
#include "tests/Globals.h"
#include "tests/IAccessor.h"
@@ -33,6 +35,11 @@
#include "tests/framework/Fixture.h"
#include "tests/validation/reference/ElementwiseUnary.h"
+#include <tuple>
+#include <limits>
+#include <type_traits>
+#include <vector>
+
namespace arm_compute
{
namespace test
@@ -64,67 +71,131 @@ protected:
{
case ElementWiseUnary::EXP:
{
- FloatDistributionType distribution{ FloatType(-1.0f), FloatType(1.0f) };
- library->fill(tensor, distribution, i);
+ switch(data_type)
+ {
+ case DataType::F32:
+ {
+ FloatDistributionType distribution{ FloatType(-86.63f), FloatType(88.36f) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::F16:
+ {
+ FloatDistributionType distribution{ FloatType(-9.00f), FloatType(10.73f) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ library->fill_tensor_uniform(tensor, i);
+ break;
+
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
+ }
+
break;
}
case ElementWiseUnary::RSQRT:
+ case ElementWiseUnary::LOG:
{
- if(data_type == DataType::F32 || data_type == DataType::F16)
+ // For floating-point data type, the chosen input range is all positive numbers
+ // (i.e. positive and negative zeros are excluded).
+ switch(data_type)
{
- FloatDistributionType distribution{ FloatType(1.0f), FloatType(2.0f) };
- library->fill(tensor, distribution, i);
+ case DataType::F32:
+ {
+ FloatDistributionType distribution{ std::numeric_limits<float>::min(), std::numeric_limits<float>::max() };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::F16:
+ {
+ FloatDistributionType distribution{ FloatType(0.00006103515625f), FloatType(65504.0f) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ library->fill_tensor_uniform(tensor, i);
+ break;
+
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
}
- else
+
+ break;
+ }
+ case ElementWiseUnary::SIN:
+ {
+ switch(data_type)
{
- library->fill_tensor_uniform(tensor, i);
+ case DataType::F32:
+ case DataType::F16:
+ {
+ FloatDistributionType distribution{ FloatType(-100.0f), FloatType(100.0f) };
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::S32:
+ {
+ std::uniform_int_distribution<int32_t> distribution(std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max());
+ library->fill(tensor, distribution, i);
+ break;
+ }
+
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ library->fill_tensor_uniform(tensor, i);
+ break;
+
+ default:
+ ARM_COMPUTE_ERROR("Not implemented");
}
+
break;
}
case ElementWiseUnary::ABS:
case ElementWiseUnary::NEG:
+ case ElementWiseUnary::ROUND:
{
switch(data_type)
{
- case DataType::F16:
+ case DataType::F32:
{
- arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -2.0f, 2.0f };
+ FloatDistributionType distribution{ std::numeric_limits<float>::lowest() / 2, std::numeric_limits<float>::max() / 2 };
library->fill(tensor, distribution, i);
break;
}
- case DataType::F32:
+
+ case DataType::F16:
{
- FloatDistributionType distribution{ FloatType(-2.0f), FloatType(2.0f) };
+ FloatDistributionType distribution{ FloatType(-65504.0f), FloatType(65504.0f) };
library->fill(tensor, distribution, i);
break;
}
+
case DataType::S32:
{
- std::uniform_int_distribution<int32_t> distribution(-100, 100);
+ std::uniform_int_distribution<int32_t> distribution(std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max());
library->fill(tensor, distribution, i);
break;
}
+
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_SIGNED:
+ library->fill_tensor_uniform(tensor, i);
+ break;
+
default:
- ARM_COMPUTE_ERROR("DataType for Elementwise Negation Not implemented");
+ ARM_COMPUTE_ERROR("Not implemented");
}
- break;
- }
- case ElementWiseUnary::LOG:
- {
- FloatDistributionType distribution{ FloatType(0.0000001f), FloatType(100.0f) };
- library->fill(tensor, distribution, i);
- break;
- }
- case ElementWiseUnary::SIN:
- {
- FloatDistributionType distribution{ FloatType(-100.00f), FloatType(100.00f) };
- library->fill(tensor, distribution, i);
- break;
- }
- case ElementWiseUnary::ROUND:
- {
- FloatDistributionType distribution{ FloatType(100.0f), FloatType(-100.0f) };
- library->fill(tensor, distribution, i);
+
break;
}
default:
@@ -199,6 +270,8 @@ protected:
SimpleTensor<T> _reference{};
ElementWiseUnary _op{};
bool _use_dynamic_shape{ false };
+ QuantizationInfo _input_qinfo{};
+ QuantizationInfo _output_qinfo{};
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class RsqrtQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
@@ -245,6 +318,17 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ExpQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::EXP, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class NegValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -256,6 +340,17 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class NegQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::NEG, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class NegValidationInPlaceFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -267,6 +362,17 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class NegQuantizedValidationInPlaceFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, bool in_place, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, in_place, ElementWiseUnary::NEG, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class LogValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -278,6 +384,17 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class LogQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::LOG, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class AbsValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -289,6 +406,17 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class AbsQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ABS, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class SinValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -300,6 +428,17 @@ public:
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class SinQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::SIN, false, iq, oq);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class RoundValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -309,6 +448,17 @@ public:
ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ROUND);
}
};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class RoundQuantizedValidationFixture : public ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type, QuantizationInfo iq, QuantizationInfo oq)
+ {
+ ElementWiseUnaryValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, false, ElementWiseUnary::ROUND, false, iq, oq);
+ }
+};
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/reference/ElementwiseUnary.cpp b/tests/validation/reference/ElementwiseUnary.cpp
index d5218d772d..558f9d24fc 100644
--- a/tests/validation/reference/ElementwiseUnary.cpp
+++ b/tests/validation/reference/ElementwiseUnary.cpp
@@ -88,8 +88,21 @@ SimpleTensor<int8_t> elementwise_unary(const SimpleTensor<int8_t> &src, SimpleTe
dst_tmp[i] = (127.0f - dst.quantization_info().uniform().offset) * dst.quantization_info().uniform().scale ;
}
break;
+
+ case ElementWiseUnary::LOG:
+ if(src_tmp[i] != 0)
+ {
+ dst_tmp[i] = std::log(src_tmp[i]);
+ }
+ else
+ {
+ dst_tmp[i] = (-128.0f - dst.quantization_info().uniform().offset) * dst.quantization_info().uniform().scale ;
+ }
+ break;
+
default:
- ARM_COMPUTE_ERROR("Not implemented");
+ elementwise_unary(src_tmp, dst_tmp, op);
+ break;
}
}
dst = convert_to_asymmetric<int8_t>(dst_tmp, dst.quantization_info());
@@ -122,8 +135,21 @@ SimpleTensor<uint8_t> elementwise_unary(const SimpleTensor<uint8_t> &src, Simple
dst_tmp[i] = (255.0f - dst.quantization_info().uniform().offset)* dst.quantization_info().uniform().scale;
}
break;
+
+ case ElementWiseUnary::LOG:
+ if(src_tmp[i] != 0)
+ {
+ dst_tmp[i] = std::log(src_tmp[i]);
+ }
+ else
+ {
+ dst_tmp[i] = -dst.quantization_info().uniform().offset * dst.quantization_info().uniform().scale;
+ }
+ break;
+
default:
- ARM_COMPUTE_ERROR("Not implemented");
+ elementwise_unary(src_tmp, dst_tmp, op);
+ break;
}
}
dst = convert_to_asymmetric<uint8_t>(dst_tmp, dst.quantization_info());