aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorViet-Hoa Do <viet-hoa.do@arm.com>2022-08-10 11:56:49 +0100
committerViet-Hoa Do <viet-hoa.do@arm.com>2022-08-17 11:35:03 +0000
commit29db3d293302254b80e82651895d997ca145142a (patch)
tree606d1193e7f92623e0af97aea513106ec468bff0
parente54d8c07e75d70baeb80fecbb43088027ea45658 (diff)
downloadComputeLibrary-29db3d293302254b80e82651895d997ca145142a.tar.gz
Add LUT for quantized sigmoid function
* Move LUT implementation to a seperate file. It will be used for both QASYMM8 and QASYMM8_SIGNED. * Fix wrong constant value related to QASYMM8_SIGNED leaky ReLU in 32-bit build. Resolves: COMPMID-5464 Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com> Change-Id: I2b24d52409a38f1b66fd532f431eff8a9e4547b6 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8066 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp1
-rw-r--r--arm_compute/core/QuantizationInfo.h20
-rw-r--r--arm_compute/core/Types.h43
-rw-r--r--filelist.json10
-rw-r--r--src/core/common/Registrars.h6
-rw-r--r--src/cpu/kernels/CpuActivationKernel.cpp6
-rw-r--r--src/cpu/kernels/activation/generic/neon/lut.cpp439
-rw-r--r--src/cpu/kernels/activation/generic/neon/qasymm8.cpp411
-rw-r--r--src/cpu/kernels/activation/generic/neon/qasymm8_signed.cpp10
-rw-r--r--src/cpu/kernels/activation/list.h2
10 files changed, 530 insertions, 418 deletions
diff --git a/Android.bp b/Android.bp
index 4a6ba4f3ab..6f6c66cc55 100644
--- a/Android.bp
+++ b/Android.bp
@@ -447,6 +447,7 @@ cc_library_static {
"src/cpu/kernels/CpuWinogradConv2dKernel.cpp",
"src/cpu/kernels/activation/generic/neon/fp16.cpp",
"src/cpu/kernels/activation/generic/neon/fp32.cpp",
+ "src/cpu/kernels/activation/generic/neon/lut.cpp",
"src/cpu/kernels/activation/generic/neon/qasymm8.cpp",
"src/cpu/kernels/activation/generic/neon/qasymm8_signed.cpp",
"src/cpu/kernels/activation/generic/neon/qsymm16.cpp",
diff --git a/arm_compute/core/QuantizationInfo.h b/arm_compute/core/QuantizationInfo.h
index 21d962d08b..ed1025edc0 100644
--- a/arm_compute/core/QuantizationInfo.h
+++ b/arm_compute/core/QuantizationInfo.h
@@ -420,6 +420,26 @@ inline qasymm8_t qasymm8_leaky_relu(qasymm8_t in,
return tmp;
}
+inline qasymm8_t qasymm8_logistic(qasymm8_t in,
+ const UniformQuantizationInfo &qi_in,
+ const UniformQuantizationInfo &qi_out)
+{
+ float tmp_f = dequantize_qasymm8(in, qi_in);
+ tmp_f = 1.f / (1.f + std::exp(-tmp_f));
+ const qasymm8_t tmp = quantize_qasymm8(tmp_f, qi_out);
+ return tmp;
+}
+
+inline qasymm8_signed_t qasymm8_signed_logistic(qasymm8_signed_t in,
+ const UniformQuantizationInfo &qi_in,
+ const UniformQuantizationInfo &qi_out)
+{
+ float tmp_f = dequantize_qasymm8_signed(in, qi_in);
+ tmp_f = 1.f / (1.f + std::exp(-tmp_f));
+ const qasymm8_signed_t tmp = quantize_qasymm8_signed(tmp_f, qi_out);
+ return tmp;
+}
+
/** Dequantize a value given a 8-bit symmetric quantization scheme
*
* @param[in] value Value to dequantize
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 952c174194..1fad815a50 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -1686,7 +1686,7 @@ public:
return _lut;
}
- void init_lut(const UniformQuantizationInfo &qi_in, const UniformQuantizationInfo &qi_out)
+ void init_lut(DataType data_type, const UniformQuantizationInfo &qi_in, const UniformQuantizationInfo &qi_out)
{
if(_act == ActivationFunction::HARD_SWISH)
{
@@ -1696,14 +1696,35 @@ public:
{
qasymm8_leaky_relu_populate_table(_lut, qi_in, qi_out, _a);
}
+ else if(_act == ActivationFunction::LOGISTIC)
+ {
+ if(data_type == DataType::QASYMM8)
+ {
+ qasymm8_logistic_populate_table(_lut, qi_in, qi_out);
+ }
+ else
+ {
+ qasymm8_signed_logistic_populate_table(_lut, qi_in, qi_out);
+ }
+ }
}
#endif // __aarch64__
static inline bool is_lut_supported(ActivationFunction act_func, DataType data_type)
{
#ifdef __aarch64__
- auto supported = (data_type == DataType::QASYMM8 && (act_func == ActivationFunction::HARD_SWISH || act_func == ActivationFunction::LEAKY_RELU));
- return supported;
+ switch(act_func)
+ {
+ case ActivationFunction::HARD_SWISH:
+ case ActivationFunction::LEAKY_RELU:
+ return data_type == DataType::QASYMM8;
+
+ case ActivationFunction::LOGISTIC:
+ return (data_type == DataType::QASYMM8) || (data_type == DataType::QASYMM8_SIGNED);
+
+ default:
+ return false;
+ }
#else // __aarch64__
ARM_COMPUTE_UNUSED(act_func);
ARM_COMPUTE_UNUSED(data_type);
@@ -1735,6 +1756,22 @@ private:
lut[i] = qasymm8_leaky_relu(i, qi_in, qi_out, alpha);
}
}
+
+ static inline void qasymm8_logistic_populate_table(LookupTable256 &lut, const UniformQuantizationInfo &qi_in, const UniformQuantizationInfo &qi_out)
+ {
+ for(size_t i = 0; i < lut.size(); ++i)
+ {
+ lut[i] = qasymm8_logistic(i, qi_in, qi_out);
+ }
+ }
+
+ static inline void qasymm8_signed_logistic_populate_table(LookupTable256 &lut, const UniformQuantizationInfo &qi_in, const UniformQuantizationInfo &qi_out)
+ {
+ for(size_t i = 0; i < lut.size(); ++i)
+ {
+ lut[i] = qasymm8_signed_logistic(static_cast<int8_t>(i), qi_in, qi_out);
+ }
+ }
#endif // __aarch64__
};
diff --git a/filelist.json b/filelist.json
index c5de028928..c218ed9129 100644
--- a/filelist.json
+++ b/filelist.json
@@ -855,8 +855,14 @@
"neon": {
"fp16": [ "src/cpu/kernels/activation/generic/neon/fp16.cpp" ],
"fp32": [ "src/cpu/kernels/activation/generic/neon/fp32.cpp" ],
- "qasymm8": [ "src/cpu/kernels/activation/generic/neon/qasymm8.cpp" ],
- "qasymm8_signed": [ "src/cpu/kernels/activation/generic/neon/qasymm8_signed.cpp" ],
+ "qasymm8": [
+ "src/cpu/kernels/activation/generic/neon/qasymm8.cpp",
+ "src/cpu/kernels/activation/generic/neon/lut.cpp"
+ ],
+ "qasymm8_signed": [
+ "src/cpu/kernels/activation/generic/neon/qasymm8_signed.cpp",
+ "src/cpu/kernels/activation/generic/neon/lut.cpp"
+ ],
"qsymm16": [ "src/cpu/kernels/activation/generic/neon/qsymm16.cpp" ]
},
"sve": {
diff --git a/src/core/common/Registrars.h b/src/core/common/Registrars.h
index 42c1aaa9fa..d6dc3449fc 100644
--- a/src/core/common/Registrars.h
+++ b/src/core/common/Registrars.h
@@ -141,6 +141,12 @@
#define REGISTER_QSYMM16_SVE2(func_name) nullptr
#endif /* defined(ENABLE_QSYMM16_KERNELS) */
+#if defined(ENABLE_QASYMM8_KERNELS) || defined(ENABLE_QASYMM8_SIGNED_KERNELS)
+#define REGISTER_Q8_NEON(func_name) &(func_name)
+#else /* !defined(ENABLE_QASYMM8_KERNELS) && !defined(ENABLE_QASYMM8_SIGNED_KERNELS) */
+#define REGISTER_Q8_NEON(func_name) nullptr
+#endif /* defined(ENABLE_QASYMM8_KERNELS) || defined(ENABLE_QASYMM8_SIGNED_KERNELS) */
+
#if defined(ENABLE_INTEGER_KERNELS)
#if defined(ARM_COMPUTE_ENABLE_SVE)
diff --git a/src/cpu/kernels/CpuActivationKernel.cpp b/src/cpu/kernels/CpuActivationKernel.cpp
index 9eaf44af51..ee9db99080 100644
--- a/src/cpu/kernels/CpuActivationKernel.cpp
+++ b/src/cpu/kernels/CpuActivationKernel.cpp
@@ -47,9 +47,9 @@ static const std::vector<CpuActivationKernel::ActivationKernel> available_kernel
{
#ifdef __aarch64__
{ // Neon LUT implementantion takes precedence
- "neon_qu8_activation_lut",
+ "neon_q8_activation_lut",
[](const ActivationDataTypeISASelectorData & data) { return ActivationLayerInfo::is_lut_supported(data.f, data.dt); },
- REGISTER_QASYMM8_NEON(arm_compute::cpu::neon_qasymm8_activation_lut)
+ REGISTER_Q8_NEON(arm_compute::cpu::neon_q8_activation_lut)
},
#endif // __aarch64__
{
@@ -193,7 +193,7 @@ void CpuActivationKernel::configure(const ITensorInfo *src, ITensorInfo *dst, Ac
#ifdef __aarch64__
if(ActivationLayerInfo::is_lut_supported(activation_info.activation(), src->data_type()))
{
- activation_info.init_lut(src->quantization_info().uniform(),(dst)?dst->quantization_info().uniform():src->quantization_info().uniform());
+ activation_info.init_lut(src->data_type(), src->quantization_info().uniform(), (dst)?dst->quantization_info().uniform():src->quantization_info().uniform());
}
#endif // __aarch64__
_act_info = activation_info;
diff --git a/src/cpu/kernels/activation/generic/neon/lut.cpp b/src/cpu/kernels/activation/generic/neon/lut.cpp
new file mode 100644
index 0000000000..ddf7f1bd61
--- /dev/null
+++ b/src/cpu/kernels/activation/generic/neon/lut.cpp
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/core/Helpers.h"
+
+#include <arm_neon.h>
+#include <cstdint>
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace
+{
+#ifdef __aarch64__
+
+void substitute_bytes_neon(
+ const uint8_t *table,
+ size_t num_strings,
+ size_t string_length,
+ const uint8_t *const *input,
+ uint8_t *const *output)
+{
+ __asm__ __volatile__(
+ "ldr q16, [%x[table], #0x0]\n"
+ "ldr q17, [%x[table], #0x10]\n"
+ "mov x22, #0x0\n"
+ "ldr q18, [%x[table], #0x20]\n"
+ "ldr q19, [%x[table], #0x30]\n"
+ "ldr q20, [%x[table], #0x40]\n"
+ "ldr q21, [%x[table], #0x50]\n"
+ "ldr q22, [%x[table], #0x60]\n"
+ "ldr q23, [%x[table], #0x70]\n"
+ "ldr q24, [%x[table], #0x80]\n"
+ "ldr q25, [%x[table], #0x90]\n"
+ "ldr q26, [%x[table], #0xa0]\n"
+ "ldr q27, [%x[table], #0xb0]\n"
+ "ldr q28, [%x[table], #0xc0]\n"
+ "ldr q29, [%x[table], #0xd0]\n"
+ "ldr q30, [%x[table], #0xe0]\n"
+ "ldr q31, [%x[table], #0xf0]\n"
+ "1:" // string loop
+ "ldr x21, [%x[input], x22, LSL #0x3]\n"
+ "ldr x20, [%x[output], x22, LSL #0x3]\n"
+ "movi v12.16b, #0x40\n"
+ "movi v11.16b, #0x80\n"
+ "movi v10.16b, #0xc0\n"
+ "mov x19, %x[string_length]\n"
+ "2:" // 4 rounds: width loop
+ "cmp x19, #0x30\n"
+ "bge 27f\n"
+ "tbz x19, #5, 10f\n"
+ "ld1 { v9.16b }, [x21], #0x10\n"
+ "ld1 { v13.16b }, [x21], #0x10\n"
+ "tbz x19, #3, 6f\n"
+ "ldr d14, [x21], #0x8\n"
+ "tbz x19, #2, 4f\n"
+ "ld1 { v14.s }[2], [x21], #0x4\n"
+ "tbz x19, #1, 3f\n"
+ "ld1 { v14.h }[6], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v14.b }[14], [x21]\n"
+ "b 26f\n"
+ "3:" // 4 rounds: Partial load: partial_1_44
+ "tbz x19, #0, 26f\n"
+ "ld1 { v14.b }[12], [x21]\n"
+ "b 26f\n"
+ "4:" // 4 rounds: Partial load: partial_2_40
+ "tbz x19, #1, 5f\n"
+ "ld1 { v14.h }[4], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v14.b }[10], [x21]\n"
+ "b 26f\n"
+ "5:" // 4 rounds: Partial load: partial_1_40
+ "tbz x19, #0, 26f\n"
+ "ld1 { v14.b }[8], [x21]\n"
+ "b 26f\n"
+ "6:" // 4 rounds: Partial load: partial_4_32
+ "tbz x19, #2, 8f\n"
+ "ldr s14, [x21], #0x4\n"
+ "tbz x19, #1, 7f\n"
+ "ld1 { v14.h }[2], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v14.b }[6], [x21]\n"
+ "b 26f\n"
+ "7:" // 4 rounds: Partial load: partial_1_36
+ "tbz x19, #0, 26f\n"
+ "ld1 { v14.b }[4], [x21]\n"
+ "b 26f\n"
+ "8:" // 4 rounds: Partial load: partial_2_32
+ "tbz x19, #1, 9f\n"
+ "ldr h14, [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v14.b }[2], [x21]\n"
+ "b 26f\n"
+ "9:" // 4 rounds: Partial load: partial_1_32
+ "tbz x19, #0, 26f\n"
+ "ldr b14, [x21, #0x0]\n"
+ "b 26f\n"
+ "10:" // 4 rounds: Partial load: partial_16_0
+ "tbz x19, #4, 18f\n"
+ "ld1 { v9.16b }, [x21], #0x10\n"
+ "tbz x19, #3, 14f\n"
+ "ldr d13, [x21], #0x8\n"
+ "tbz x19, #2, 12f\n"
+ "ld1 { v13.s }[2], [x21], #0x4\n"
+ "tbz x19, #1, 11f\n"
+ "ld1 { v13.h }[6], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v13.b }[14], [x21]\n"
+ "b 26f\n"
+ "11:" // 4 rounds: Partial load: partial_1_28
+ "tbz x19, #0, 26f\n"
+ "ld1 { v13.b }[12], [x21]\n"
+ "b 26f\n"
+ "12:" // 4 rounds: Partial load: partial_2_24
+ "tbz x19, #1, 13f\n"
+ "ld1 { v13.h }[4], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v13.b }[10], [x21]\n"
+ "b 26f\n"
+ "13:" // 4 rounds: Partial load: partial_1_24
+ "tbz x19, #0, 26f\n"
+ "ld1 { v13.b }[8], [x21]\n"
+ "b 26f\n"
+ "14:" // 4 rounds: Partial load: partial_4_16
+ "tbz x19, #2, 16f\n"
+ "ldr s13, [x21], #0x4\n"
+ "tbz x19, #1, 15f\n"
+ "ld1 { v13.h }[2], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v13.b }[6], [x21]\n"
+ "b 26f\n"
+ "15:" // 4 rounds: Partial load: partial_1_20
+ "tbz x19, #0, 26f\n"
+ "ld1 { v13.b }[4], [x21]\n"
+ "b 26f\n"
+ "16:" // 4 rounds: Partial load: partial_2_16
+ "tbz x19, #1, 17f\n"
+ "ldr h13, [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v13.b }[2], [x21]\n"
+ "b 26f\n"
+ "17:" // 4 rounds: Partial load: partial_1_16
+ "tbz x19, #0, 26f\n"
+ "ldr b13, [x21, #0x0]\n"
+ "b 26f\n"
+ "18:" // 4 rounds: Partial load: partial_8_0
+ "tbz x19, #3, 22f\n"
+ "ldr d9, [x21], #0x8\n"
+ "tbz x19, #2, 20f\n"
+ "ld1 { v9.s }[2], [x21], #0x4\n"
+ "tbz x19, #1, 19f\n"
+ "ld1 { v9.h }[6], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v9.b }[14], [x21]\n"
+ "b 26f\n"
+ "19:" // 4 rounds: Partial load: partial_1_12
+ "tbz x19, #0, 26f\n"
+ "ld1 { v9.b }[12], [x21]\n"
+ "b 26f\n"
+ "20:" // 4 rounds: Partial load: partial_2_8
+ "tbz x19, #1, 21f\n"
+ "ld1 { v9.h }[4], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v9.b }[10], [x21]\n"
+ "b 26f\n"
+ "21:" // 4 rounds: Partial load: partial_1_8
+ "tbz x19, #0, 26f\n"
+ "ld1 { v9.b }[8], [x21]\n"
+ "b 26f\n"
+ "22:" // 4 rounds: Partial load: partial_4_0
+ "tbz x19, #2, 24f\n"
+ "ldr s9, [x21], #0x4\n"
+ "tbz x19, #1, 23f\n"
+ "ld1 { v9.h }[2], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v9.b }[6], [x21]\n"
+ "b 26f\n"
+ "23:" // 4 rounds: Partial load: partial_1_4
+ "tbz x19, #0, 26f\n"
+ "ld1 { v9.b }[4], [x21]\n"
+ "b 26f\n"
+ "24:" // 4 rounds: Partial load: partial_2_0
+ "tbz x19, #1, 25f\n"
+ "ldr h9, [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v9.b }[2], [x21]\n"
+ "b 26f\n"
+ "25:" // 4 rounds: Partial load: partial_1_0
+ "ldr b9, [x21, #0x0]\n"
+ "26:" // 4 rounds: Partial load: Done
+ "b 28f\n"
+ "27:" // 4 rounds: Full load
+ "ldr q9, [x21, #0x0]\n"
+ "ldr q13, [x21, #0x10]\n"
+ "ldr q14, [x21, #0x20]\n"
+ "add x21, x21, #0x30\n"
+ "28:" // 4 rounds: Load done
+ "sub v8.16b, v9.16b, v12.16b\n"
+ "sub v7.16b, v9.16b, v11.16b\n"
+ "tbl v8.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v8.16b\n"
+ "sub v6.16b, v9.16b, v10.16b\n"
+ "sub v5.16b, v13.16b, v12.16b\n"
+ "tbl v9.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v9.16b\n"
+ "sub v4.16b, v13.16b, v11.16b\n"
+ "sub v3.16b, v13.16b, v10.16b\n"
+ "tbl v7.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v7.16b\n"
+ "sub v2.16b, v14.16b, v12.16b\n"
+ "sub v1.16b, v14.16b, v11.16b\n"
+ "tbl v6.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v6.16b\n"
+ "sub v0.16b, v14.16b, v10.16b\n"
+ "tbl v13.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v13.16b\n"
+ "tbl v5.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v5.16b\n"
+ "tbl v4.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v4.16b\n"
+ "tbl v3.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v3.16b\n"
+ "orr v9.16b, v9.16b, v8.16b\n"
+ "tbl v14.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v14.16b\n"
+ "tbl v2.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v2.16b\n"
+ "orr v7.16b, v7.16b, v6.16b\n"
+ "tbl v1.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v1.16b\n"
+ "tbl v0.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v0.16b\n"
+ "orr v13.16b, v13.16b, v5.16b\n"
+ "orr v4.16b, v4.16b, v3.16b\n"
+ "orr v14.16b, v14.16b, v2.16b\n"
+ "cmp x19, #0x30\n"
+ "orr v1.16b, v1.16b, v0.16b\n"
+ "orr v9.16b, v9.16b, v7.16b\n"
+ "orr v13.16b, v13.16b, v4.16b\n"
+ "orr v14.16b, v14.16b, v1.16b\n"
+ "bge 53f\n"
+ "tbz x19, #5, 36f\n"
+ "st1 { v9.16b }, [x20], #0x10\n"
+ "st1 { v13.16b }, [x20], #0x10\n"
+ "tbz x19, #3, 32f\n"
+ "str d14, [x20], #0x8\n"
+ "tbz x19, #2, 30f\n"
+ "st1 { v14.s }[2], [x20], #0x4\n"
+ "tbz x19, #1, 29f\n"
+ "st1 { v14.h }[6], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v14.b }[14], [x20]\n"
+ "b 52f\n"
+ "29:" // 4 rounds: Partial writeback: partial_1_44
+ "tbz x19, #0, 52f\n"
+ "st1 { v14.b }[12], [x20]\n"
+ "b 52f\n"
+ "30:" // 4 rounds: Partial writeback: partial_2_40
+ "tbz x19, #1, 31f\n"
+ "st1 { v14.h }[4], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v14.b }[10], [x20]\n"
+ "b 52f\n"
+ "31:" // 4 rounds: Partial writeback: partial_1_40
+ "tbz x19, #0, 52f\n"
+ "st1 { v14.b }[8], [x20]\n"
+ "b 52f\n"
+ "32:" // 4 rounds: Partial writeback: partial_4_32
+ "tbz x19, #2, 34f\n"
+ "str s14, [x20], #0x4\n"
+ "tbz x19, #1, 33f\n"
+ "st1 { v14.h }[2], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v14.b }[6], [x20]\n"
+ "b 52f\n"
+ "33:" // 4 rounds: Partial writeback: partial_1_36
+ "tbz x19, #0, 52f\n"
+ "st1 { v14.b }[4], [x20]\n"
+ "b 52f\n"
+ "34:" // 4 rounds: Partial writeback: partial_2_32
+ "tbz x19, #1, 35f\n"
+ "str h14, [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v14.b }[2], [x20]\n"
+ "b 52f\n"
+ "35:" // 4 rounds: Partial writeback: partial_1_32
+ "tbz x19, #0, 52f\n"
+ "str b14, [x20, #0x0]\n"
+ "b 52f\n"
+ "36:" // 4 rounds: Partial writeback: partial_16_0
+ "tbz x19, #4, 44f\n"
+ "st1 { v9.16b }, [x20], #0x10\n"
+ "tbz x19, #3, 40f\n"
+ "str d13, [x20], #0x8\n"
+ "tbz x19, #2, 38f\n"
+ "st1 { v13.s }[2], [x20], #0x4\n"
+ "tbz x19, #1, 37f\n"
+ "st1 { v13.h }[6], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v13.b }[14], [x20]\n"
+ "b 52f\n"
+ "37:" // 4 rounds: Partial writeback: partial_1_28
+ "tbz x19, #0, 52f\n"
+ "st1 { v13.b }[12], [x20]\n"
+ "b 52f\n"
+ "38:" // 4 rounds: Partial writeback: partial_2_24
+ "tbz x19, #1, 39f\n"
+ "st1 { v13.h }[4], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v13.b }[10], [x20]\n"
+ "b 52f\n"
+ "39:" // 4 rounds: Partial writeback: partial_1_24
+ "tbz x19, #0, 52f\n"
+ "st1 { v13.b }[8], [x20]\n"
+ "b 52f\n"
+ "40:" // 4 rounds: Partial writeback: partial_4_16
+ "tbz x19, #2, 42f\n"
+ "str s13, [x20], #0x4\n"
+ "tbz x19, #1, 41f\n"
+ "st1 { v13.h }[2], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v13.b }[6], [x20]\n"
+ "b 52f\n"
+ "41:" // 4 rounds: Partial writeback: partial_1_20
+ "tbz x19, #0, 52f\n"
+ "st1 { v13.b }[4], [x20]\n"
+ "b 52f\n"
+ "42:" // 4 rounds: Partial writeback: partial_2_16
+ "tbz x19, #1, 43f\n"
+ "str h13, [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v13.b }[2], [x20]\n"
+ "b 52f\n"
+ "43:" // 4 rounds: Partial writeback: partial_1_16
+ "tbz x19, #0, 52f\n"
+ "str b13, [x20, #0x0]\n"
+ "b 52f\n"
+ "44:" // 4 rounds: Partial writeback: partial_8_0
+ "tbz x19, #3, 48f\n"
+ "str d9, [x20], #0x8\n"
+ "tbz x19, #2, 46f\n"
+ "st1 { v9.s }[2], [x20], #0x4\n"
+ "tbz x19, #1, 45f\n"
+ "st1 { v9.h }[6], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v9.b }[14], [x20]\n"
+ "b 52f\n"
+ "45:" // 4 rounds: Partial writeback: partial_1_12
+ "tbz x19, #0, 52f\n"
+ "st1 { v9.b }[12], [x20]\n"
+ "b 52f\n"
+ "46:" // 4 rounds: Partial writeback: partial_2_8
+ "tbz x19, #1, 47f\n"
+ "st1 { v9.h }[4], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v9.b }[10], [x20]\n"
+ "b 52f\n"
+ "47:" // 4 rounds: Partial writeback: partial_1_8
+ "tbz x19, #0, 52f\n"
+ "st1 { v9.b }[8], [x20]\n"
+ "b 52f\n"
+ "48:" // 4 rounds: Partial writeback: partial_4_0
+ "tbz x19, #2, 50f\n"
+ "str s9, [x20], #0x4\n"
+ "tbz x19, #1, 49f\n"
+ "st1 { v9.h }[2], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v9.b }[6], [x20]\n"
+ "b 52f\n"
+ "49:" // 4 rounds: Partial writeback: partial_1_4
+ "tbz x19, #0, 52f\n"
+ "st1 { v9.b }[4], [x20]\n"
+ "b 52f\n"
+ "50:" // 4 rounds: Partial writeback: partial_2_0
+ "tbz x19, #1, 51f\n"
+ "str h9, [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v9.b }[2], [x20]\n"
+ "b 52f\n"
+ "51:" // 4 rounds: Partial writeback: partial_1_0
+ "str b9, [x20, #0x0]\n"
+ "52:" // 4 rounds: Partial writeback: Done
+ "b 54f\n"
+ "53:" // 4 rounds: Full writeback
+ "str q9, [x20, #0x0]\n"
+ "str q13, [x20, #0x10]\n"
+ "str q14, [x20, #0x20]\n"
+ "add x20, x20, #0x30\n"
+ "54:" // 4 rounds: Writeback done
+ "subs x19, x19, #0x30\n"
+ "bgt 2b\n"
+ "add x22, x22, #0x1\n"
+ "cmp x22, %x[num_strings]\n"
+ "bne 1b\n"
+ :
+ : [input] "r"(input), [num_strings] "r"(num_strings), [output] "r"(output), [string_length] "r"(string_length), [table] "r"(table)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22");
+}
+
+#endif // __aarch64__
+} // namespace
+
+void neon_q8_activation_lut(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
+{
+ ARM_COMPUTE_ERROR_ON(!ActivationLayerInfo::is_lut_supported(act_info.activation(), src->info()->data_type()));
+#ifdef __aarch64__
+ const int window_step_x = src->info()->tensor_shape().x();
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+ Iterator input(src, win_collapsed);
+ Iterator output(dst, win_collapsed);
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
+ {
+ const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr());
+ auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
+ substitute_bytes_neon(act_info.lut().data(), 1u, window_step_x, &input_ptr, &output_ptr);
+ },
+ input, output);
+#else // #ifdef __aarch64__
+ ARM_COMPUTE_UNUSED(src);
+ ARM_COMPUTE_UNUSED(dst);
+ ARM_COMPUTE_UNUSED(act_info);
+ ARM_COMPUTE_UNUSED(window);
+ ARM_COMPUTE_ERROR("LUT Only supported in aarch64.");
+#endif // __aarch64__
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/activation/generic/neon/qasymm8.cpp b/src/cpu/kernels/activation/generic/neon/qasymm8.cpp
index 5095ecf5bd..67d9e0a8ca 100644
--- a/src/cpu/kernels/activation/generic/neon/qasymm8.cpp
+++ b/src/cpu/kernels/activation/generic/neon/qasymm8.cpp
@@ -37,411 +37,6 @@ namespace arm_compute
{
namespace cpu
{
-namespace
-{
-#ifdef __aarch64__
-
-void substitute_bytes_neon(
- const uint8_t *table,
- size_t num_strings,
- size_t string_length,
- const uint8_t *const *input,
- uint8_t *const *output)
-{
- __asm__ __volatile__(
- "ldr q16, [%x[table], #0x0]\n"
- "ldr q17, [%x[table], #0x10]\n"
- "mov x22, #0x0\n"
- "ldr q18, [%x[table], #0x20]\n"
- "ldr q19, [%x[table], #0x30]\n"
- "ldr q20, [%x[table], #0x40]\n"
- "ldr q21, [%x[table], #0x50]\n"
- "ldr q22, [%x[table], #0x60]\n"
- "ldr q23, [%x[table], #0x70]\n"
- "ldr q24, [%x[table], #0x80]\n"
- "ldr q25, [%x[table], #0x90]\n"
- "ldr q26, [%x[table], #0xa0]\n"
- "ldr q27, [%x[table], #0xb0]\n"
- "ldr q28, [%x[table], #0xc0]\n"
- "ldr q29, [%x[table], #0xd0]\n"
- "ldr q30, [%x[table], #0xe0]\n"
- "ldr q31, [%x[table], #0xf0]\n"
- "1:" // string loop
- "ldr x21, [%x[input], x22, LSL #0x3]\n"
- "ldr x20, [%x[output], x22, LSL #0x3]\n"
- "movi v12.16b, #0x40\n"
- "movi v11.16b, #0x80\n"
- "movi v10.16b, #0xc0\n"
- "mov x19, %x[string_length]\n"
- "2:" // 4 rounds: width loop
- "cmp x19, #0x30\n"
- "bge 27f\n"
- "tbz x19, #5, 10f\n"
- "ld1 { v9.16b }, [x21], #0x10\n"
- "ld1 { v13.16b }, [x21], #0x10\n"
- "tbz x19, #3, 6f\n"
- "ldr d14, [x21], #0x8\n"
- "tbz x19, #2, 4f\n"
- "ld1 { v14.s }[2], [x21], #0x4\n"
- "tbz x19, #1, 3f\n"
- "ld1 { v14.h }[6], [x21], #0x2\n"
- "tbz x19, #0, 26f\n"
- "ld1 { v14.b }[14], [x21]\n"
- "b 26f\n"
- "3:" // 4 rounds: Partial load: partial_1_44
- "tbz x19, #0, 26f\n"
- "ld1 { v14.b }[12], [x21]\n"
- "b 26f\n"
- "4:" // 4 rounds: Partial load: partial_2_40
- "tbz x19, #1, 5f\n"
- "ld1 { v14.h }[4], [x21], #0x2\n"
- "tbz x19, #0, 26f\n"
- "ld1 { v14.b }[10], [x21]\n"
- "b 26f\n"
- "5:" // 4 rounds: Partial load: partial_1_40
- "tbz x19, #0, 26f\n"
- "ld1 { v14.b }[8], [x21]\n"
- "b 26f\n"
- "6:" // 4 rounds: Partial load: partial_4_32
- "tbz x19, #2, 8f\n"
- "ldr s14, [x21], #0x4\n"
- "tbz x19, #1, 7f\n"
- "ld1 { v14.h }[2], [x21], #0x2\n"
- "tbz x19, #0, 26f\n"
- "ld1 { v14.b }[6], [x21]\n"
- "b 26f\n"
- "7:" // 4 rounds: Partial load: partial_1_36
- "tbz x19, #0, 26f\n"
- "ld1 { v14.b }[4], [x21]\n"
- "b 26f\n"
- "8:" // 4 rounds: Partial load: partial_2_32
- "tbz x19, #1, 9f\n"
- "ldr h14, [x21], #0x2\n"
- "tbz x19, #0, 26f\n"
- "ld1 { v14.b }[2], [x21]\n"
- "b 26f\n"
- "9:" // 4 rounds: Partial load: partial_1_32
- "tbz x19, #0, 26f\n"
- "ldr b14, [x21, #0x0]\n"
- "b 26f\n"
- "10:" // 4 rounds: Partial load: partial_16_0
- "tbz x19, #4, 18f\n"
- "ld1 { v9.16b }, [x21], #0x10\n"
- "tbz x19, #3, 14f\n"
- "ldr d13, [x21], #0x8\n"
- "tbz x19, #2, 12f\n"
- "ld1 { v13.s }[2], [x21], #0x4\n"
- "tbz x19, #1, 11f\n"
- "ld1 { v13.h }[6], [x21], #0x2\n"
- "tbz x19, #0, 26f\n"
- "ld1 { v13.b }[14], [x21]\n"
- "b 26f\n"
- "11:" // 4 rounds: Partial load: partial_1_28
- "tbz x19, #0, 26f\n"
- "ld1 { v13.b }[12], [x21]\n"
- "b 26f\n"
- "12:" // 4 rounds: Partial load: partial_2_24
- "tbz x19, #1, 13f\n"
- "ld1 { v13.h }[4], [x21], #0x2\n"
- "tbz x19, #0, 26f\n"
- "ld1 { v13.b }[10], [x21]\n"
- "b 26f\n"
- "13:" // 4 rounds: Partial load: partial_1_24
- "tbz x19, #0, 26f\n"
- "ld1 { v13.b }[8], [x21]\n"
- "b 26f\n"
- "14:" // 4 rounds: Partial load: partial_4_16
- "tbz x19, #2, 16f\n"
- "ldr s13, [x21], #0x4\n"
- "tbz x19, #1, 15f\n"
- "ld1 { v13.h }[2], [x21], #0x2\n"
- "tbz x19, #0, 26f\n"
- "ld1 { v13.b }[6], [x21]\n"
- "b 26f\n"
- "15:" // 4 rounds: Partial load: partial_1_20
- "tbz x19, #0, 26f\n"
- "ld1 { v13.b }[4], [x21]\n"
- "b 26f\n"
- "16:" // 4 rounds: Partial load: partial_2_16
- "tbz x19, #1, 17f\n"
- "ldr h13, [x21], #0x2\n"
- "tbz x19, #0, 26f\n"
- "ld1 { v13.b }[2], [x21]\n"
- "b 26f\n"
- "17:" // 4 rounds: Partial load: partial_1_16
- "tbz x19, #0, 26f\n"
- "ldr b13, [x21, #0x0]\n"
- "b 26f\n"
- "18:" // 4 rounds: Partial load: partial_8_0
- "tbz x19, #3, 22f\n"
- "ldr d9, [x21], #0x8\n"
- "tbz x19, #2, 20f\n"
- "ld1 { v9.s }[2], [x21], #0x4\n"
- "tbz x19, #1, 19f\n"
- "ld1 { v9.h }[6], [x21], #0x2\n"
- "tbz x19, #0, 26f\n"
- "ld1 { v9.b }[14], [x21]\n"
- "b 26f\n"
- "19:" // 4 rounds: Partial load: partial_1_12
- "tbz x19, #0, 26f\n"
- "ld1 { v9.b }[12], [x21]\n"
- "b 26f\n"
- "20:" // 4 rounds: Partial load: partial_2_8
- "tbz x19, #1, 21f\n"
- "ld1 { v9.h }[4], [x21], #0x2\n"
- "tbz x19, #0, 26f\n"
- "ld1 { v9.b }[10], [x21]\n"
- "b 26f\n"
- "21:" // 4 rounds: Partial load: partial_1_8
- "tbz x19, #0, 26f\n"
- "ld1 { v9.b }[8], [x21]\n"
- "b 26f\n"
- "22:" // 4 rounds: Partial load: partial_4_0
- "tbz x19, #2, 24f\n"
- "ldr s9, [x21], #0x4\n"
- "tbz x19, #1, 23f\n"
- "ld1 { v9.h }[2], [x21], #0x2\n"
- "tbz x19, #0, 26f\n"
- "ld1 { v9.b }[6], [x21]\n"
- "b 26f\n"
- "23:" // 4 rounds: Partial load: partial_1_4
- "tbz x19, #0, 26f\n"
- "ld1 { v9.b }[4], [x21]\n"
- "b 26f\n"
- "24:" // 4 rounds: Partial load: partial_2_0
- "tbz x19, #1, 25f\n"
- "ldr h9, [x21], #0x2\n"
- "tbz x19, #0, 26f\n"
- "ld1 { v9.b }[2], [x21]\n"
- "b 26f\n"
- "25:" // 4 rounds: Partial load: partial_1_0
- "ldr b9, [x21, #0x0]\n"
- "26:" // 4 rounds: Partial load: Done
- "b 28f\n"
- "27:" // 4 rounds: Full load
- "ldr q9, [x21, #0x0]\n"
- "ldr q13, [x21, #0x10]\n"
- "ldr q14, [x21, #0x20]\n"
- "add x21, x21, #0x30\n"
- "28:" // 4 rounds: Load done
- "sub v8.16b, v9.16b, v12.16b\n"
- "sub v7.16b, v9.16b, v11.16b\n"
- "tbl v8.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v8.16b\n"
- "sub v6.16b, v9.16b, v10.16b\n"
- "sub v5.16b, v13.16b, v12.16b\n"
- "tbl v9.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v9.16b\n"
- "sub v4.16b, v13.16b, v11.16b\n"
- "sub v3.16b, v13.16b, v10.16b\n"
- "tbl v7.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v7.16b\n"
- "sub v2.16b, v14.16b, v12.16b\n"
- "sub v1.16b, v14.16b, v11.16b\n"
- "tbl v6.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v6.16b\n"
- "sub v0.16b, v14.16b, v10.16b\n"
- "tbl v13.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v13.16b\n"
- "tbl v5.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v5.16b\n"
- "tbl v4.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v4.16b\n"
- "tbl v3.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v3.16b\n"
- "orr v9.16b, v9.16b, v8.16b\n"
- "tbl v14.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v14.16b\n"
- "tbl v2.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v2.16b\n"
- "orr v7.16b, v7.16b, v6.16b\n"
- "tbl v1.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v1.16b\n"
- "tbl v0.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v0.16b\n"
- "orr v13.16b, v13.16b, v5.16b\n"
- "orr v4.16b, v4.16b, v3.16b\n"
- "orr v14.16b, v14.16b, v2.16b\n"
- "cmp x19, #0x30\n"
- "orr v1.16b, v1.16b, v0.16b\n"
- "orr v9.16b, v9.16b, v7.16b\n"
- "orr v13.16b, v13.16b, v4.16b\n"
- "orr v14.16b, v14.16b, v1.16b\n"
- "bge 53f\n"
- "tbz x19, #5, 36f\n"
- "st1 { v9.16b }, [x20], #0x10\n"
- "st1 { v13.16b }, [x20], #0x10\n"
- "tbz x19, #3, 32f\n"
- "str d14, [x20], #0x8\n"
- "tbz x19, #2, 30f\n"
- "st1 { v14.s }[2], [x20], #0x4\n"
- "tbz x19, #1, 29f\n"
- "st1 { v14.h }[6], [x20], #0x2\n"
- "tbz x19, #0, 52f\n"
- "st1 { v14.b }[14], [x20]\n"
- "b 52f\n"
- "29:" // 4 rounds: Partial writeback: partial_1_44
- "tbz x19, #0, 52f\n"
- "st1 { v14.b }[12], [x20]\n"
- "b 52f\n"
- "30:" // 4 rounds: Partial writeback: partial_2_40
- "tbz x19, #1, 31f\n"
- "st1 { v14.h }[4], [x20], #0x2\n"
- "tbz x19, #0, 52f\n"
- "st1 { v14.b }[10], [x20]\n"
- "b 52f\n"
- "31:" // 4 rounds: Partial writeback: partial_1_40
- "tbz x19, #0, 52f\n"
- "st1 { v14.b }[8], [x20]\n"
- "b 52f\n"
- "32:" // 4 rounds: Partial writeback: partial_4_32
- "tbz x19, #2, 34f\n"
- "str s14, [x20], #0x4\n"
- "tbz x19, #1, 33f\n"
- "st1 { v14.h }[2], [x20], #0x2\n"
- "tbz x19, #0, 52f\n"
- "st1 { v14.b }[6], [x20]\n"
- "b 52f\n"
- "33:" // 4 rounds: Partial writeback: partial_1_36
- "tbz x19, #0, 52f\n"
- "st1 { v14.b }[4], [x20]\n"
- "b 52f\n"
- "34:" // 4 rounds: Partial writeback: partial_2_32
- "tbz x19, #1, 35f\n"
- "str h14, [x20], #0x2\n"
- "tbz x19, #0, 52f\n"
- "st1 { v14.b }[2], [x20]\n"
- "b 52f\n"
- "35:" // 4 rounds: Partial writeback: partial_1_32
- "tbz x19, #0, 52f\n"
- "str b14, [x20, #0x0]\n"
- "b 52f\n"
- "36:" // 4 rounds: Partial writeback: partial_16_0
- "tbz x19, #4, 44f\n"
- "st1 { v9.16b }, [x20], #0x10\n"
- "tbz x19, #3, 40f\n"
- "str d13, [x20], #0x8\n"
- "tbz x19, #2, 38f\n"
- "st1 { v13.s }[2], [x20], #0x4\n"
- "tbz x19, #1, 37f\n"
- "st1 { v13.h }[6], [x20], #0x2\n"
- "tbz x19, #0, 52f\n"
- "st1 { v13.b }[14], [x20]\n"
- "b 52f\n"
- "37:" // 4 rounds: Partial writeback: partial_1_28
- "tbz x19, #0, 52f\n"
- "st1 { v13.b }[12], [x20]\n"
- "b 52f\n"
- "38:" // 4 rounds: Partial writeback: partial_2_24
- "tbz x19, #1, 39f\n"
- "st1 { v13.h }[4], [x20], #0x2\n"
- "tbz x19, #0, 52f\n"
- "st1 { v13.b }[10], [x20]\n"
- "b 52f\n"
- "39:" // 4 rounds: Partial writeback: partial_1_24
- "tbz x19, #0, 52f\n"
- "st1 { v13.b }[8], [x20]\n"
- "b 52f\n"
- "40:" // 4 rounds: Partial writeback: partial_4_16
- "tbz x19, #2, 42f\n"
- "str s13, [x20], #0x4\n"
- "tbz x19, #1, 41f\n"
- "st1 { v13.h }[2], [x20], #0x2\n"
- "tbz x19, #0, 52f\n"
- "st1 { v13.b }[6], [x20]\n"
- "b 52f\n"
- "41:" // 4 rounds: Partial writeback: partial_1_20
- "tbz x19, #0, 52f\n"
- "st1 { v13.b }[4], [x20]\n"
- "b 52f\n"
- "42:" // 4 rounds: Partial writeback: partial_2_16
- "tbz x19, #1, 43f\n"
- "str h13, [x20], #0x2\n"
- "tbz x19, #0, 52f\n"
- "st1 { v13.b }[2], [x20]\n"
- "b 52f\n"
- "43:" // 4 rounds: Partial writeback: partial_1_16
- "tbz x19, #0, 52f\n"
- "str b13, [x20, #0x0]\n"
- "b 52f\n"
- "44:" // 4 rounds: Partial writeback: partial_8_0
- "tbz x19, #3, 48f\n"
- "str d9, [x20], #0x8\n"
- "tbz x19, #2, 46f\n"
- "st1 { v9.s }[2], [x20], #0x4\n"
- "tbz x19, #1, 45f\n"
- "st1 { v9.h }[6], [x20], #0x2\n"
- "tbz x19, #0, 52f\n"
- "st1 { v9.b }[14], [x20]\n"
- "b 52f\n"
- "45:" // 4 rounds: Partial writeback: partial_1_12
- "tbz x19, #0, 52f\n"
- "st1 { v9.b }[12], [x20]\n"
- "b 52f\n"
- "46:" // 4 rounds: Partial writeback: partial_2_8
- "tbz x19, #1, 47f\n"
- "st1 { v9.h }[4], [x20], #0x2\n"
- "tbz x19, #0, 52f\n"
- "st1 { v9.b }[10], [x20]\n"
- "b 52f\n"
- "47:" // 4 rounds: Partial writeback: partial_1_8
- "tbz x19, #0, 52f\n"
- "st1 { v9.b }[8], [x20]\n"
- "b 52f\n"
- "48:" // 4 rounds: Partial writeback: partial_4_0
- "tbz x19, #2, 50f\n"
- "str s9, [x20], #0x4\n"
- "tbz x19, #1, 49f\n"
- "st1 { v9.h }[2], [x20], #0x2\n"
- "tbz x19, #0, 52f\n"
- "st1 { v9.b }[6], [x20]\n"
- "b 52f\n"
- "49:" // 4 rounds: Partial writeback: partial_1_4
- "tbz x19, #0, 52f\n"
- "st1 { v9.b }[4], [x20]\n"
- "b 52f\n"
- "50:" // 4 rounds: Partial writeback: partial_2_0
- "tbz x19, #1, 51f\n"
- "str h9, [x20], #0x2\n"
- "tbz x19, #0, 52f\n"
- "st1 { v9.b }[2], [x20]\n"
- "b 52f\n"
- "51:" // 4 rounds: Partial writeback: partial_1_0
- "str b9, [x20, #0x0]\n"
- "52:" // 4 rounds: Partial writeback: Done
- "b 54f\n"
- "53:" // 4 rounds: Full writeback
- "str q9, [x20, #0x0]\n"
- "str q13, [x20, #0x10]\n"
- "str q14, [x20, #0x20]\n"
- "add x20, x20, #0x30\n"
- "54:" // 4 rounds: Writeback done
- "subs x19, x19, #0x30\n"
- "bgt 2b\n"
- "add x22, x22, #0x1\n"
- "cmp x22, %x[num_strings]\n"
- "bne 1b\n"
- :
- : [input] "r"(input), [num_strings] "r"(num_strings), [output] "r"(output), [string_length] "r"(string_length), [table] "r"(table)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22");
-}
-
-#endif // __aarch64__
-} // namespace
-
-void neon_qasymm8_activation_lut(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
-{
- ARM_COMPUTE_ERROR_ON(!ActivationLayerInfo::is_lut_supported(act_info.activation(), src->info()->data_type()));
-#ifdef __aarch64__
- const int window_step_x = src->info()->tensor_shape().x();
- Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
- win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
- Iterator input(src, win_collapsed);
- Iterator output(dst, win_collapsed);
- execute_window_loop(win_collapsed, [&](const Coordinates &)
- {
- const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr());
- auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
- substitute_bytes_neon(act_info.lut().data(), 1u, window_step_x, &input_ptr, &output_ptr);
- },
- input, output);
-#else // #ifdef __aarch64__
- ARM_COMPUTE_UNUSED(src);
- ARM_COMPUTE_UNUSED(dst);
- ARM_COMPUTE_UNUSED(act_info);
- ARM_COMPUTE_UNUSED(window);
- ARM_COMPUTE_ERROR("LUT Only supported in aarch64.");
-#endif // __aarch64__
-}
-
void neon_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
{
constexpr int window_step_x = 16;
@@ -463,8 +58,8 @@ void neon_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationL
const qasymm8_t b = quantize_qasymm8(act_info.b(), qi_in);
const qasymm8_t const_0 = quantize_qasymm8(0.f, qi_in);
const qasymm8x16_t vconst_0 = vdupq_n_u8(const_0);
- const auto vconst_1 = vdupq_n_f32(1.f);
#ifndef __aarch64__
+ const auto vconst_1 = vdupq_n_f32(1.f);
const auto vconst_0_f32 = vdupq_n_f32(0);
#endif // __aarch64__
const float32x4_t va_f32 = vdupq_n_f32(act_info.a());
@@ -518,6 +113,7 @@ void neon_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationL
// Re-quantize to new output space
tmp = vmlaq_qasymm8(tmp, vs, vo);
}
+#ifndef __aarch64__ // LUT-based implementation is used for aarch64 instead.
else if(act == ActivationLayerInfo::ActivationFunction::LOGISTIC)
{
// De-quantize
@@ -535,6 +131,7 @@ void neon_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationL
// Re-quantize to new output space
tmp = vquantize(tmp_dep, qi_out);
}
+#endif // __aarch64__
else if(act == ActivationLayerInfo::ActivationFunction::TANH)
{
// De-quantize
@@ -624,12 +221,14 @@ void neon_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationL
tmp = std::min(a, std::max(b, in));
tmp = utility::clamp<int32_t, qasymm8_t>(tmp * s + o);
}
+#ifndef __aarch64__ // LUT-based implementation is used for aarch64 instead.
else if(act == ActivationLayerInfo::ActivationFunction::LOGISTIC)
{
float tmp_f = dequantize_qasymm8(in, qi_in);
tmp_f = 1.f / (1.f + std::exp(-tmp_f));
tmp = quantize_qasymm8(tmp_f, qi_out);
}
+#endif // __aarch64__
else if(act == ActivationLayerInfo::ActivationFunction::TANH)
{
float tmp_f = dequantize_qasymm8(in, qi_in);
diff --git a/src/cpu/kernels/activation/generic/neon/qasymm8_signed.cpp b/src/cpu/kernels/activation/generic/neon/qasymm8_signed.cpp
index 4dca1ba794..d7c982e414 100644
--- a/src/cpu/kernels/activation/generic/neon/qasymm8_signed.cpp
+++ b/src/cpu/kernels/activation/generic/neon/qasymm8_signed.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021 Arm Limited.
+ * Copyright (c) 2020-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -56,9 +56,9 @@ void neon_qasymm8_signed_activation(const ITensor *src, ITensor *dst, const Acti
const qasymm8_signed_t b = quantize_qasymm8_signed(act_info.b(), qi_in);
const qasymm8_signed_t const_0 = quantize_qasymm8_signed(0.f, qi_in);
const qasymm8x16_signed_t vconst_0 = vdupq_n_s8(const_0);
- const auto vconst_1 = vdupq_n_f32(1.f);
#ifndef __aarch64__
- const auto vconst_0_f32 = vdupq_n_f32(1.f);
+ const auto vconst_1 = vdupq_n_f32(1.f);
+ const auto vconst_0_f32 = vdupq_n_f32(0.f);
#endif // __aarch64__
const float32x4_t va_f32 = vdupq_n_f32(act_info.a());
const float32x4_t vb_f32 = vdupq_n_f32(act_info.b());
@@ -108,6 +108,7 @@ void neon_qasymm8_signed_activation(const ITensor *src, ITensor *dst, const Acti
// Re-quantize to new output space
tmp = vmlaq_qasymm8_signed(tmp, vs, vo);
}
+#ifndef __aarch64__ // LUT-based implementation is used for aarch64 instead.
else if(act == ActivationLayerInfo::ActivationFunction::LOGISTIC)
{
// De-quantize
@@ -125,6 +126,7 @@ void neon_qasymm8_signed_activation(const ITensor *src, ITensor *dst, const Acti
// Re-quantize to new output space
tmp = vquantize_signed(tmp_dep, qi_out);
}
+#endif // __aarch64__
else if(act == ActivationLayerInfo::ActivationFunction::TANH)
{
// De-quantize
@@ -224,12 +226,14 @@ void neon_qasymm8_signed_activation(const ITensor *src, ITensor *dst, const Acti
tmp = std::min(a, std::max(b, in));
tmp = utility::clamp<int32_t, qasymm8_signed_t>(tmp * s + o);
}
+#ifndef __aarch64__ // LUT-based implementation is used for aarch64 instead.
else if(act == ActivationLayerInfo::ActivationFunction::LOGISTIC)
{
float tmp_f = dequantize_qasymm8_signed(in, qi_in);
tmp_f = 1.f / (1.f + std::exp(-tmp_f));
tmp = quantize_qasymm8_signed(tmp_f, qi_out);
}
+#endif // __aarch64__
else if(act == ActivationLayerInfo::ActivationFunction::TANH)
{
float tmp_f = dequantize_qasymm8_signed(in, qi_in);
diff --git a/src/cpu/kernels/activation/list.h b/src/cpu/kernels/activation/list.h
index b2322a6477..3850d4de6b 100644
--- a/src/cpu/kernels/activation/list.h
+++ b/src/cpu/kernels/activation/list.h
@@ -31,8 +31,8 @@ namespace cpu
#define DECLARE_ACTIVATION_KERNEL(func_name) \
void func_name(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
+DECLARE_ACTIVATION_KERNEL(neon_q8_activation_lut);
DECLARE_ACTIVATION_KERNEL(neon_qasymm8_activation);
-DECLARE_ACTIVATION_KERNEL(neon_qasymm8_activation_lut);
DECLARE_ACTIVATION_KERNEL(sve2_qasymm8_activation);
DECLARE_ACTIVATION_KERNEL(neon_qasymm8_signed_activation);
DECLARE_ACTIVATION_KERNEL(sve2_qasymm8_signed_activation);