aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPablo Marquez Tello <pablo.tello@arm.com>2022-05-26 14:19:39 +0100
committerPablo Marquez Tello <pablo.tello@arm.com>2022-06-07 12:33:48 +0000
commitd75cd8ac5de1a785712e2e23c735a0167d64dfa3 (patch)
tree4e07c3cb0b159e14596c5e47d677de5877ec6efc
parentfe1b1f6d94c196f086122613277ff95062a7e834 (diff)
downloadComputeLibrary-d75cd8ac5de1a785712e2e23c735a0167d64dfa3.tar.gz
Compute Hard-Swish with a Lookup table for qasymm8.
* Resolves COMPMID-5211 Change-Id: I5914f971d733174dae67e6b4c589f45c75733cf7 Signed-off-by: Pablo Marquez Tello <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7654 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/QuantizationInfo.h12
-rw-r--r--arm_compute/core/Types.h74
-rw-r--r--src/cpu/kernels/CpuActivationKernel.cpp36
-rw-r--r--src/cpu/kernels/CpuActivationKernel.h6
-rw-r--r--src/cpu/kernels/CpuKernelSelectionTypes.h11
-rw-r--r--src/cpu/kernels/activation/generic/neon/qasymm8.cpp460
-rw-r--r--src/cpu/kernels/activation/list.h3
-rw-r--r--tests/validation/NEON/ActivationLayer.cpp2
8 files changed, 526 insertions, 78 deletions
diff --git a/arm_compute/core/QuantizationInfo.h b/arm_compute/core/QuantizationInfo.h
index b331f7d923..0bd0f21bc1 100644
--- a/arm_compute/core/QuantizationInfo.h
+++ b/arm_compute/core/QuantizationInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -399,6 +399,16 @@ inline float dequantize_qsymm8(int8_t value, const UniformQuantizationInfo &qinf
return value * qinfo.scale;
}
+inline qasymm8_t qasymm8_hard_swish(qasymm8_t in,
+ const UniformQuantizationInfo &qi_in,
+ const UniformQuantizationInfo &qi_out)
+{
+ float tmp_f = dequantize_qasymm8(in, qi_in);
+ tmp_f = tmp_f * ((std::min(std::max((tmp_f + 3), 0.0f), 6.0f)) * 0.166666667f);
+ const qasymm8_t tmp = quantize_qasymm8(tmp_f, qi_out);
+ return tmp;
+}
+
/** Dequantize a value given a 8-bit symmetric quantization scheme
*
* @param[in] value Value to dequantize
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index b1a1eb6f44..4524976d6b 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -774,10 +774,10 @@ public:
private:
std::pair<unsigned int, unsigned int> _stride;
- unsigned int _pad_left;
- unsigned int _pad_top;
- unsigned int _pad_right;
- unsigned int _pad_bottom;
+ unsigned int _pad_left;
+ unsigned int _pad_top;
+ unsigned int _pad_right;
+ unsigned int _pad_bottom;
DimensionRoundingType _round_type;
};
@@ -919,14 +919,14 @@ public:
}
private:
- std::vector<float> _min_sizes;
- std::vector<float> _variances;
- float _offset;
- bool _flip;
- bool _clip;
- std::vector<float> _max_sizes;
- std::vector<float> _aspect_ratios;
- Coordinates2D _img_size;
+ std::vector<float> _min_sizes;
+ std::vector<float> _variances;
+ float _offset;
+ bool _flip;
+ bool _clip;
+ std::vector<float> _max_sizes;
+ std::vector<float> _aspect_ratios;
+ Coordinates2D _img_size;
std::array<float, 2> _steps;
};
@@ -1171,15 +1171,15 @@ public:
}
private:
- unsigned int _max_detections;
- unsigned int _max_classes_per_detection;
- float _nms_score_threshold;
- float _iou_threshold;
- unsigned int _num_classes;
+ unsigned int _max_detections;
+ unsigned int _max_classes_per_detection;
+ float _nms_score_threshold;
+ float _iou_threshold;
+ unsigned int _num_classes;
std::array<float, 4> _scales_values;
- bool _use_regular_nms;
- unsigned int _detection_per_class;
- bool _dequantize_scores;
+ bool _use_regular_nms;
+ unsigned int _detection_per_class;
+ bool _dequantize_scores;
};
/** Pooling Layer Information struct*/
@@ -1612,13 +1612,13 @@ public:
}
private:
- float _img_width;
- float _img_height;
- float _scale;
- bool _apply_scale;
- bool _correct_transform_coords;
+ float _img_width;
+ float _img_height;
+ float _scale;
+ bool _apply_scale;
+ bool _correct_transform_coords;
std::array<float, 4> _weights;
- float _bbox_xform_clip;
+ float _bbox_xform_clip;
};
/** Activation Layer Information class */
@@ -1644,6 +1644,9 @@ public:
HARD_SWISH /**< Hard-swish ( \f$ f(x) = (x * relu6(x+3))/6 \f$ ) */
};
+ /** Lookup table */
+ using LookupTable256 = std::array<qasymm8_t, 256>;
+
ActivationLayerInfo() = default;
/** Default Constructor
*
@@ -1677,11 +1680,30 @@ public:
return _enabled;
}
+ const LookupTable256 &lut() const
+ {
+ return _lut;
+ }
+
+ void init_lut(const UniformQuantizationInfo &qi_in, const UniformQuantizationInfo &qi_out)
+ {
+ qasymm8_hard_swish_populate_table(_lut, qi_in, qi_out);
+ }
+
private:
ActivationFunction _act = { ActivationLayerInfo::ActivationFunction::IDENTITY };
float _a = {};
float _b = {};
bool _enabled = { false };
+ LookupTable256 _lut = {};
+
+ inline void qasymm8_hard_swish_populate_table(LookupTable256 &lut, const UniformQuantizationInfo &qi_in, const UniformQuantizationInfo &qi_out)
+ {
+ for(size_t i = 0; i < lut.size(); ++i)
+ {
+ lut[i] = qasymm8_hard_swish(i, qi_in, qi_out);
+ }
+ }
};
/** Fully connected layer info */
diff --git a/src/cpu/kernels/CpuActivationKernel.cpp b/src/cpu/kernels/CpuActivationKernel.cpp
index 74148071ae..7d13817eab 100644
--- a/src/cpu/kernels/CpuActivationKernel.cpp
+++ b/src/cpu/kernels/CpuActivationKernel.cpp
@@ -47,52 +47,57 @@ static const std::vector<CpuActivationKernel::ActivationKernel> available_kernel
{
{
"sve2_qu8_activation",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8 && data.isa.sve2; },
+ [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8 && data.isa.sve2; },
REGISTER_QASYMM8_SVE2(arm_compute::cpu::sve2_qasymm8_activation)
},
{
"sve2_qs8_activation",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8_SIGNED && data.isa.sve2; },
+ [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8_SIGNED && data.isa.sve2; },
REGISTER_QASYMM8_SIGNED_SVE2(arm_compute::cpu::sve2_qasymm8_signed_activation)
},
{
"sve2_qs16_activation",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::QSYMM16 && data.isa.sve2; },
+ [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QSYMM16 && data.isa.sve2; },
REGISTER_QSYMM16_SVE2(arm_compute::cpu::sve2_qsymm16_activation)
},
{
"sve_fp16_activation",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.sve && data.isa.fp16; },
+ [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.sve && data.isa.fp16; },
REGISTER_FP16_SVE(arm_compute::cpu::sve_fp16_activation)
},
{
"sve_fp32_activation",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::F32 && data.isa.sve; },
+ [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::F32 && data.isa.sve; },
REGISTER_FP32_SVE(arm_compute::cpu::sve_fp32_activation)
},
{
"neon_fp16_activation",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.fp16; },
+ [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::F16 && data.isa.fp16; },
REGISTER_FP16_NEON(arm_compute::cpu::neon_fp16_activation)
},
{
"neon_fp32_activation",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::F32; },
+ [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::F32; },
REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_activation)
},
{
"neon_qu8_activation",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8; },
+ [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8 && data.f != ActivationLayerInfo::ActivationFunction::HARD_SWISH; },
REGISTER_QASYMM8_NEON(arm_compute::cpu::neon_qasymm8_activation)
},
{
+ "neon_qu8_activation_hardswish",
+ [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8 && data.f == ActivationLayerInfo::ActivationFunction::HARD_SWISH; },
+ REGISTER_QASYMM8_NEON(arm_compute::cpu::neon_qasymm8_hardswish_lut)
+ },
+ {
"neon_qs8_activation",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8_SIGNED; },
+ [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8_SIGNED; },
REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::neon_qasymm8_signed_activation)
},
{
"neon_qs16_activation",
- [](const DataTypeISASelectorData & data) { return data.dt == DataType::QSYMM16; },
+ [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QSYMM16; },
REGISTER_QSYMM16_NEON(arm_compute::cpu::neon_qsymm16_activation)
},
};
@@ -122,7 +127,7 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const
ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::QSYMM16, DataType::F16, DataType::F32);
- const auto *uk = CpuActivationKernel::get_implementation(DataTypeISASelectorData{ src->data_type(), CPUInfo::get().get_isa() });
+ const auto *uk = CpuActivationKernel::get_implementation(ActivationDataTypeISASelectorData{ src->data_type(), CPUInfo::get().get_isa(), activation_info.activation() });
ARM_COMPUTE_RETURN_ERROR_ON(uk == nullptr || uk->ukernel == nullptr);
@@ -176,14 +181,19 @@ void CpuActivationKernel::configure(const ITensorInfo *src, ITensorInfo *dst, Ac
ARM_COMPUTE_ERROR_ON_NULLPTR(src);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, activation_info));
- const auto uk = CpuActivationKernel::get_implementation(DataTypeISASelectorData{ src->data_type(), CPUInfo::get().get_isa() });
+ const auto uk = CpuActivationKernel::get_implementation(ActivationDataTypeISASelectorData{ src->data_type(), CPUInfo::get().get_isa(), activation_info.activation() });
ARM_COMPUTE_ERROR_ON_NULLPTR(uk);
- _act_info = activation_info;
_run_method = uk->ukernel;
_name = std::string("CpuActivationKernel").append("/").append(uk->name);
+ if(activation_info.activation() == ActivationLayerInfo::ActivationFunction::HARD_SWISH && src->data_type() == DataType::QASYMM8)
+ {
+ activation_info.init_lut(src->quantization_info().uniform(), dst->quantization_info().uniform());
+ }
+ _act_info = activation_info;
+
// Configure kernel window
auto win_config = validate_and_configure_window(src, dst);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
diff --git a/src/cpu/kernels/CpuActivationKernel.h b/src/cpu/kernels/CpuActivationKernel.h
index b0476303f0..d856a9357f 100644
--- a/src/cpu/kernels/CpuActivationKernel.h
+++ b/src/cpu/kernels/CpuActivationKernel.h
@@ -75,9 +75,9 @@ public:
struct ActivationKernel
{
- const char *name;
- const DataTypeISASelectorPtr is_selected;
- ActivationKernelPtr ukernel;
+ const char *name;
+ const ActivationDataTypeISASelectorDataPtr is_selected;
+ ActivationKernelPtr ukernel;
};
static const std::vector<ActivationKernel> &get_available_kernels();
diff --git a/src/cpu/kernels/CpuKernelSelectionTypes.h b/src/cpu/kernels/CpuKernelSelectionTypes.h
index afcf014ad2..12542e5064 100644
--- a/src/cpu/kernels/CpuKernelSelectionTypes.h
+++ b/src/cpu/kernels/CpuKernelSelectionTypes.h
@@ -75,6 +75,14 @@ struct DepthwiseConv2dNativeDataTypeISASelectorData
DataType source_dt;
const cpuinfo::CpuIsaInfo &isa;
};
+
+struct ActivationDataTypeISASelectorData
+{
+ DataType dt;
+ const cpuinfo::CpuIsaInfo &isa;
+ ActivationLayerInfo::ActivationFunction f;
+};
+
// Selector pointer types
using DataTypeISASelectorPtr = std::add_pointer<bool(const DataTypeISASelectorData &data)>::type;
using DataTypeDataLayoutSelectorPtr = std::add_pointer<bool(const DataTypeDataLayoutISASelectorData &data)>::type;
@@ -82,9 +90,10 @@ using PoolDataTypeISASelectorPtr = std::add_pointer<bool(const
using ElementwiseDataTypeISASelectorPtr = std::add_pointer<bool(const ElementwiseDataTypeISASelectorData &data)>::type;
using DepthwiseConv2dNativeDataTypeISASelectorPtr = std::add_pointer<bool(const DepthwiseConv2dNativeDataTypeISASelectorData &data)>::type;
using CastDataTypeISASelectorDataPtr = std::add_pointer<bool(const CastDataTypeISASelectorData &data)>::type;
+using ActivationDataTypeISASelectorDataPtr = std::add_pointer<bool(const ActivationDataTypeISASelectorData &data)>::type;
} // namespace kernels
} // namespace cpu
} // namespace arm_compute
-#endif // ARM_COMPUTE_CPU_KERNEL_SELECTION_TYPES_H \ No newline at end of file
+#endif // ARM_COMPUTE_CPU_KERNEL_SELECTION_TYPES_H
diff --git a/src/cpu/kernels/activation/generic/neon/qasymm8.cpp b/src/cpu/kernels/activation/generic/neon/qasymm8.cpp
index 62e329e691..f35d0d298f 100644
--- a/src/cpu/kernels/activation/generic/neon/qasymm8.cpp
+++ b/src/cpu/kernels/activation/generic/neon/qasymm8.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021 Arm Limited.
+ * Copyright (c) 2020-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,11 +31,434 @@
#include <arm_neon.h>
#include <cmath>
#include <cstddef>
+#include <cstdint>
namespace arm_compute
{
namespace cpu
{
+namespace
+{
+#ifdef __aarch64__
+
+void substitute_bytes_neon(
+ const uint8_t *table,
+ size_t num_strings,
+ size_t string_length,
+ const uint8_t *const *input,
+ uint8_t *const *output)
+{
+ __asm__ __volatile__(
+ "ldr q16, [%x[table], #0x0]\n"
+ "ldr q17, [%x[table], #0x10]\n"
+ "mov x22, #0x0\n"
+ "ldr q18, [%x[table], #0x20]\n"
+ "ldr q19, [%x[table], #0x30]\n"
+ "ldr q20, [%x[table], #0x40]\n"
+ "ldr q21, [%x[table], #0x50]\n"
+ "ldr q22, [%x[table], #0x60]\n"
+ "ldr q23, [%x[table], #0x70]\n"
+ "ldr q24, [%x[table], #0x80]\n"
+ "ldr q25, [%x[table], #0x90]\n"
+ "ldr q26, [%x[table], #0xa0]\n"
+ "ldr q27, [%x[table], #0xb0]\n"
+ "ldr q28, [%x[table], #0xc0]\n"
+ "ldr q29, [%x[table], #0xd0]\n"
+ "ldr q30, [%x[table], #0xe0]\n"
+ "ldr q31, [%x[table], #0xf0]\n"
+ "1:" // string loop
+ "ldr x21, [%x[input], x22, LSL #0x3]\n"
+ "ldr x20, [%x[output], x22, LSL #0x3]\n"
+ "movi v12.16b, #0x40\n"
+ "movi v11.16b, #0x80\n"
+ "movi v10.16b, #0xc0\n"
+ "mov x19, %x[string_length]\n"
+ "2:" // 4 rounds: width loop
+ "cmp x19, #0x30\n"
+ "bge 27f\n"
+ "tbz x19, #5, 10f\n"
+ "ld1 { v9.16b }, [x21], #0x10\n"
+ "ld1 { v13.16b }, [x21], #0x10\n"
+ "tbz x19, #3, 6f\n"
+ "ldr d14, [x21], #0x8\n"
+ "tbz x19, #2, 4f\n"
+ "ld1 { v14.s }[2], [x21], #0x4\n"
+ "tbz x19, #1, 3f\n"
+ "ld1 { v14.h }[6], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v14.b }[14], [x21]\n"
+ "b 26f\n"
+ "3:" // 4 rounds: Partial load: partial_1_44
+ "tbz x19, #0, 26f\n"
+ "ld1 { v14.b }[12], [x21]\n"
+ "b 26f\n"
+ "4:" // 4 rounds: Partial load: partial_2_40
+ "tbz x19, #1, 5f\n"
+ "ld1 { v14.h }[4], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v14.b }[10], [x21]\n"
+ "b 26f\n"
+ "5:" // 4 rounds: Partial load: partial_1_40
+ "tbz x19, #0, 26f\n"
+ "ld1 { v14.b }[8], [x21]\n"
+ "b 26f\n"
+ "6:" // 4 rounds: Partial load: partial_4_32
+ "tbz x19, #2, 8f\n"
+ "ldr s14, [x21], #0x4\n"
+ "tbz x19, #1, 7f\n"
+ "ld1 { v14.h }[2], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v14.b }[6], [x21]\n"
+ "b 26f\n"
+ "7:" // 4 rounds: Partial load: partial_1_36
+ "tbz x19, #0, 26f\n"
+ "ld1 { v14.b }[4], [x21]\n"
+ "b 26f\n"
+ "8:" // 4 rounds: Partial load: partial_2_32
+ "tbz x19, #1, 9f\n"
+ "ldr h14, [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v14.b }[2], [x21]\n"
+ "b 26f\n"
+ "9:" // 4 rounds: Partial load: partial_1_32
+ "tbz x19, #0, 26f\n"
+ "ldr b14, [x21, #0x0]\n"
+ "b 26f\n"
+ "10:" // 4 rounds: Partial load: partial_16_0
+ "tbz x19, #4, 18f\n"
+ "ld1 { v9.16b }, [x21], #0x10\n"
+ "tbz x19, #3, 14f\n"
+ "ldr d13, [x21], #0x8\n"
+ "tbz x19, #2, 12f\n"
+ "ld1 { v13.s }[2], [x21], #0x4\n"
+ "tbz x19, #1, 11f\n"
+ "ld1 { v13.h }[6], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v13.b }[14], [x21]\n"
+ "b 26f\n"
+ "11:" // 4 rounds: Partial load: partial_1_28
+ "tbz x19, #0, 26f\n"
+ "ld1 { v13.b }[12], [x21]\n"
+ "b 26f\n"
+ "12:" // 4 rounds: Partial load: partial_2_24
+ "tbz x19, #1, 13f\n"
+ "ld1 { v13.h }[4], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v13.b }[10], [x21]\n"
+ "b 26f\n"
+ "13:" // 4 rounds: Partial load: partial_1_24
+ "tbz x19, #0, 26f\n"
+ "ld1 { v13.b }[8], [x21]\n"
+ "b 26f\n"
+ "14:" // 4 rounds: Partial load: partial_4_16
+ "tbz x19, #2, 16f\n"
+ "ldr s13, [x21], #0x4\n"
+ "tbz x19, #1, 15f\n"
+ "ld1 { v13.h }[2], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v13.b }[6], [x21]\n"
+ "b 26f\n"
+ "15:" // 4 rounds: Partial load: partial_1_20
+ "tbz x19, #0, 26f\n"
+ "ld1 { v13.b }[4], [x21]\n"
+ "b 26f\n"
+ "16:" // 4 rounds: Partial load: partial_2_16
+ "tbz x19, #1, 17f\n"
+ "ldr h13, [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v13.b }[2], [x21]\n"
+ "b 26f\n"
+ "17:" // 4 rounds: Partial load: partial_1_16
+ "tbz x19, #0, 26f\n"
+ "ldr b13, [x21, #0x0]\n"
+ "b 26f\n"
+ "18:" // 4 rounds: Partial load: partial_8_0
+ "tbz x19, #3, 22f\n"
+ "ldr d9, [x21], #0x8\n"
+ "tbz x19, #2, 20f\n"
+ "ld1 { v9.s }[2], [x21], #0x4\n"
+ "tbz x19, #1, 19f\n"
+ "ld1 { v9.h }[6], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v9.b }[14], [x21]\n"
+ "b 26f\n"
+ "19:" // 4 rounds: Partial load: partial_1_12
+ "tbz x19, #0, 26f\n"
+ "ld1 { v9.b }[12], [x21]\n"
+ "b 26f\n"
+ "20:" // 4 rounds: Partial load: partial_2_8
+ "tbz x19, #1, 21f\n"
+ "ld1 { v9.h }[4], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v9.b }[10], [x21]\n"
+ "b 26f\n"
+ "21:" // 4 rounds: Partial load: partial_1_8
+ "tbz x19, #0, 26f\n"
+ "ld1 { v9.b }[8], [x21]\n"
+ "b 26f\n"
+ "22:" // 4 rounds: Partial load: partial_4_0
+ "tbz x19, #2, 24f\n"
+ "ldr s9, [x21], #0x4\n"
+ "tbz x19, #1, 23f\n"
+ "ld1 { v9.h }[2], [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v9.b }[6], [x21]\n"
+ "b 26f\n"
+ "23:" // 4 rounds: Partial load: partial_1_4
+ "tbz x19, #0, 26f\n"
+ "ld1 { v9.b }[4], [x21]\n"
+ "b 26f\n"
+ "24:" // 4 rounds: Partial load: partial_2_0
+ "tbz x19, #1, 25f\n"
+ "ldr h9, [x21], #0x2\n"
+ "tbz x19, #0, 26f\n"
+ "ld1 { v9.b }[2], [x21]\n"
+ "b 26f\n"
+ "25:" // 4 rounds: Partial load: partial_1_0
+ "ldr b9, [x21, #0x0]\n"
+ "26:" // 4 rounds: Partial load: Done
+ "b 28f\n"
+ "27:" // 4 rounds: Full load
+ "ldr q9, [x21, #0x0]\n"
+ "ldr q13, [x21, #0x10]\n"
+ "ldr q14, [x21, #0x20]\n"
+ "add x21, x21, #0x30\n"
+ "28:" // 4 rounds: Load done
+ "sub v8.16b, v9.16b, v12.16b\n"
+ "sub v7.16b, v9.16b, v11.16b\n"
+ "tbl v8.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v8.16b\n"
+ "sub v6.16b, v9.16b, v10.16b\n"
+ "sub v5.16b, v13.16b, v12.16b\n"
+ "tbl v9.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v9.16b\n"
+ "sub v4.16b, v13.16b, v11.16b\n"
+ "sub v3.16b, v13.16b, v10.16b\n"
+ "tbl v7.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v7.16b\n"
+ "sub v2.16b, v14.16b, v12.16b\n"
+ "sub v1.16b, v14.16b, v11.16b\n"
+ "tbl v6.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v6.16b\n"
+ "sub v0.16b, v14.16b, v10.16b\n"
+ "tbl v13.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v13.16b\n"
+ "tbl v5.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v5.16b\n"
+ "tbl v4.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v4.16b\n"
+ "tbl v3.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v3.16b\n"
+ "orr v9.16b, v9.16b, v8.16b\n"
+ "tbl v14.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v14.16b\n"
+ "tbl v2.16b, { v20.16b, v21.16b, v22.16b, v23.16b }, v2.16b\n"
+ "orr v7.16b, v7.16b, v6.16b\n"
+ "tbl v1.16b, { v24.16b, v25.16b, v26.16b, v27.16b }, v1.16b\n"
+ "tbl v0.16b, { v28.16b, v29.16b, v30.16b, v31.16b }, v0.16b\n"
+ "orr v13.16b, v13.16b, v5.16b\n"
+ "orr v4.16b, v4.16b, v3.16b\n"
+ "orr v14.16b, v14.16b, v2.16b\n"
+ "cmp x19, #0x30\n"
+ "orr v1.16b, v1.16b, v0.16b\n"
+ "orr v9.16b, v9.16b, v7.16b\n"
+ "orr v13.16b, v13.16b, v4.16b\n"
+ "orr v14.16b, v14.16b, v1.16b\n"
+ "bge 53f\n"
+ "tbz x19, #5, 36f\n"
+ "st1 { v9.16b }, [x20], #0x10\n"
+ "st1 { v13.16b }, [x20], #0x10\n"
+ "tbz x19, #3, 32f\n"
+ "str d14, [x20], #0x8\n"
+ "tbz x19, #2, 30f\n"
+ "st1 { v14.s }[2], [x20], #0x4\n"
+ "tbz x19, #1, 29f\n"
+ "st1 { v14.h }[6], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v14.b }[14], [x20]\n"
+ "b 52f\n"
+ "29:" // 4 rounds: Partial writeback: partial_1_44
+ "tbz x19, #0, 52f\n"
+ "st1 { v14.b }[12], [x20]\n"
+ "b 52f\n"
+ "30:" // 4 rounds: Partial writeback: partial_2_40
+ "tbz x19, #1, 31f\n"
+ "st1 { v14.h }[4], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v14.b }[10], [x20]\n"
+ "b 52f\n"
+ "31:" // 4 rounds: Partial writeback: partial_1_40
+ "tbz x19, #0, 52f\n"
+ "st1 { v14.b }[8], [x20]\n"
+ "b 52f\n"
+ "32:" // 4 rounds: Partial writeback: partial_4_32
+ "tbz x19, #2, 34f\n"
+ "str s14, [x20], #0x4\n"
+ "tbz x19, #1, 33f\n"
+ "st1 { v14.h }[2], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v14.b }[6], [x20]\n"
+ "b 52f\n"
+ "33:" // 4 rounds: Partial writeback: partial_1_36
+ "tbz x19, #0, 52f\n"
+ "st1 { v14.b }[4], [x20]\n"
+ "b 52f\n"
+ "34:" // 4 rounds: Partial writeback: partial_2_32
+ "tbz x19, #1, 35f\n"
+ "str h14, [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v14.b }[2], [x20]\n"
+ "b 52f\n"
+ "35:" // 4 rounds: Partial writeback: partial_1_32
+ "tbz x19, #0, 52f\n"
+ "str b14, [x20, #0x0]\n"
+ "b 52f\n"
+ "36:" // 4 rounds: Partial writeback: partial_16_0
+ "tbz x19, #4, 44f\n"
+ "st1 { v9.16b }, [x20], #0x10\n"
+ "tbz x19, #3, 40f\n"
+ "str d13, [x20], #0x8\n"
+ "tbz x19, #2, 38f\n"
+ "st1 { v13.s }[2], [x20], #0x4\n"
+ "tbz x19, #1, 37f\n"
+ "st1 { v13.h }[6], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v13.b }[14], [x20]\n"
+ "b 52f\n"
+ "37:" // 4 rounds: Partial writeback: partial_1_28
+ "tbz x19, #0, 52f\n"
+ "st1 { v13.b }[12], [x20]\n"
+ "b 52f\n"
+ "38:" // 4 rounds: Partial writeback: partial_2_24
+ "tbz x19, #1, 39f\n"
+ "st1 { v13.h }[4], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v13.b }[10], [x20]\n"
+ "b 52f\n"
+ "39:" // 4 rounds: Partial writeback: partial_1_24
+ "tbz x19, #0, 52f\n"
+ "st1 { v13.b }[8], [x20]\n"
+ "b 52f\n"
+ "40:" // 4 rounds: Partial writeback: partial_4_16
+ "tbz x19, #2, 42f\n"
+ "str s13, [x20], #0x4\n"
+ "tbz x19, #1, 41f\n"
+ "st1 { v13.h }[2], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v13.b }[6], [x20]\n"
+ "b 52f\n"
+ "41:" // 4 rounds: Partial writeback: partial_1_20
+ "tbz x19, #0, 52f\n"
+ "st1 { v13.b }[4], [x20]\n"
+ "b 52f\n"
+ "42:" // 4 rounds: Partial writeback: partial_2_16
+ "tbz x19, #1, 43f\n"
+ "str h13, [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v13.b }[2], [x20]\n"
+ "b 52f\n"
+ "43:" // 4 rounds: Partial writeback: partial_1_16
+ "tbz x19, #0, 52f\n"
+ "str b13, [x20, #0x0]\n"
+ "b 52f\n"
+ "44:" // 4 rounds: Partial writeback: partial_8_0
+ "tbz x19, #3, 48f\n"
+ "str d9, [x20], #0x8\n"
+ "tbz x19, #2, 46f\n"
+ "st1 { v9.s }[2], [x20], #0x4\n"
+ "tbz x19, #1, 45f\n"
+ "st1 { v9.h }[6], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v9.b }[14], [x20]\n"
+ "b 52f\n"
+ "45:" // 4 rounds: Partial writeback: partial_1_12
+ "tbz x19, #0, 52f\n"
+ "st1 { v9.b }[12], [x20]\n"
+ "b 52f\n"
+ "46:" // 4 rounds: Partial writeback: partial_2_8
+ "tbz x19, #1, 47f\n"
+ "st1 { v9.h }[4], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v9.b }[10], [x20]\n"
+ "b 52f\n"
+ "47:" // 4 rounds: Partial writeback: partial_1_8
+ "tbz x19, #0, 52f\n"
+ "st1 { v9.b }[8], [x20]\n"
+ "b 52f\n"
+ "48:" // 4 rounds: Partial writeback: partial_4_0
+ "tbz x19, #2, 50f\n"
+ "str s9, [x20], #0x4\n"
+ "tbz x19, #1, 49f\n"
+ "st1 { v9.h }[2], [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v9.b }[6], [x20]\n"
+ "b 52f\n"
+ "49:" // 4 rounds: Partial writeback: partial_1_4
+ "tbz x19, #0, 52f\n"
+ "st1 { v9.b }[4], [x20]\n"
+ "b 52f\n"
+ "50:" // 4 rounds: Partial writeback: partial_2_0
+ "tbz x19, #1, 51f\n"
+ "str h9, [x20], #0x2\n"
+ "tbz x19, #0, 52f\n"
+ "st1 { v9.b }[2], [x20]\n"
+ "b 52f\n"
+ "51:" // 4 rounds: Partial writeback: partial_1_0
+ "str b9, [x20, #0x0]\n"
+ "52:" // 4 rounds: Partial writeback: Done
+ "b 54f\n"
+ "53:" // 4 rounds: Full writeback
+ "str q9, [x20, #0x0]\n"
+ "str q13, [x20, #0x10]\n"
+ "str q14, [x20, #0x20]\n"
+ "add x20, x20, #0x30\n"
+ "54:" // 4 rounds: Writeback done
+ "subs x19, x19, #0x30\n"
+ "bgt 2b\n"
+ "add x22, x22, #0x1\n"
+ "cmp x22, %x[num_strings]\n"
+ "bne 1b\n"
+ :
+ : [input] "r"(input), [num_strings] "r"(num_strings), [output] "r"(output), [string_length] "r"(string_length), [table] "r"(table)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x19", "x20", "x21", "x22");
+}
+
+#endif // __aarch64__
+} // namespace
+
+void neon_qasymm8_hardswish_lut(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
+{
+ ARM_COMPUTE_ERROR_ON(act_info.activation() != ActivationLayerInfo::ActivationFunction::HARD_SWISH);
+#ifdef __aarch64__
+ constexpr int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator input(src, win_collapsed);
+ Iterator output(dst, win_collapsed);
+
+ execute_window_loop(win_collapsed, [&](const Coordinates &)
+ {
+ // Compute S elements per iteration
+ int x = window_start_x;
+ for(; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr() + x);
+ auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr() + x);
+ substitute_bytes_neon(act_info.lut().data(), 1u, window_step_x, &input_ptr, &output_ptr);
+ }
+ // Compute left-over elements
+ for(; x < window_end_x; ++x)
+ {
+ const auto input_ptr = reinterpret_cast<const uint8_t *>(input.ptr() + x);
+ auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr() + x);
+ substitute_bytes_neon(act_info.lut().data(), 1u, 1u, &input_ptr, &output_ptr);
+ }
+ },
+ input, output);
+#else // #ifdef __aarch64__
+ ARM_COMPUTE_UNUSED(src);
+ ARM_COMPUTE_UNUSED(dst);
+ ARM_COMPUTE_UNUSED(act_info);
+ ARM_COMPUTE_UNUSED(window);
+ ARM_COMPUTE_ERROR("LUT Only supported in aarch64.");
+#endif // __aarch64__
+}
+
void neon_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
{
constexpr int window_step_x = 16;
@@ -61,14 +484,10 @@ void neon_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationL
#ifndef __aarch64__
const auto vconst_0_f32 = vdupq_n_f32(0);
#endif // __aarch64__
- const float32x4_t va_f32 = vdupq_n_f32(act_info.a());
- const float32x4_t vb_f32 = vdupq_n_f32(act_info.b());
- const float a_f32 = act_info.a();
- const float b_f32 = act_info.b();
- const auto const_6_f32 = vdupq_n_f32(6.f);
- const auto const_0_f32 = vdupq_n_f32(0.f);
- const auto const_3_f32 = vdupq_n_f32(3.f);
- const auto const_inv_6_f32 = vdupq_n_f32(0.166666667f);
+ const float32x4_t va_f32 = vdupq_n_f32(act_info.a());
+ const float32x4_t vb_f32 = vdupq_n_f32(act_info.b());
+ const float a_f32 = act_info.a();
+ const float b_f32 = act_info.b();
// Initialise scale/offset for re-quantization
float s = qi_in.scale / qi_out.scale;
@@ -143,23 +562,6 @@ void neon_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationL
// Re-quantize to new output space
tmp = vquantize(tmp_dep, qi_out);
}
- else if(act == ActivationLayerInfo::ActivationFunction::HARD_SWISH)
- {
- // De-quantize
- const auto vin_deq = vdequantize(vin, qi_in);
- // Perform activation
- const float32x4x4_t tmp_dep =
- {
- {
- wrapper::vmul(vin_deq.val[0], wrapper::vmul(const_inv_6_f32, wrapper::vmin(const_6_f32, wrapper::vmax(const_0_f32, wrapper::vadd(vin_deq.val[0], const_3_f32))))),
- wrapper::vmul(vin_deq.val[1], wrapper::vmul(const_inv_6_f32, wrapper::vmin(const_6_f32, wrapper::vmax(const_0_f32, wrapper::vadd(vin_deq.val[1], const_3_f32))))),
- wrapper::vmul(vin_deq.val[2], wrapper::vmul(const_inv_6_f32, wrapper::vmin(const_6_f32, wrapper::vmax(const_0_f32, wrapper::vadd(vin_deq.val[2], const_3_f32))))),
- wrapper::vmul(vin_deq.val[3], wrapper::vmul(const_inv_6_f32, wrapper::vmin(const_6_f32, wrapper::vmax(const_0_f32, wrapper::vadd(vin_deq.val[3], const_3_f32))))),
- }
- };
- // Re-quantize to new output space
- tmp = vquantize(tmp_dep, qi_out);
- }
else if(act == ActivationLayerInfo::ActivationFunction::LEAKY_RELU)
{
const auto vin_deq = vdequantize(vin, qi_in);
@@ -237,12 +639,6 @@ void neon_qasymm8_activation(const ITensor *src, ITensor *dst, const ActivationL
tmp_f = a_f32 * std::tanh(b_f32 * tmp_f);
tmp = quantize_qasymm8(tmp_f, qi_out);
}
- else if(act == ActivationLayerInfo::ActivationFunction::HARD_SWISH)
- {
- float tmp_f = dequantize_qasymm8(in, qi_in);
- tmp_f = tmp_f * ((std::min(std::max((tmp_f + 3), 0.0f), 6.0f)) * 0.166666667f);
- tmp = quantize_qasymm8(tmp_f, qi_out);
- }
else if(act == ActivationLayerInfo::ActivationFunction::LEAKY_RELU)
{
float tmp_f = dequantize_qasymm8(in, qi_in);
diff --git a/src/cpu/kernels/activation/list.h b/src/cpu/kernels/activation/list.h
index bf9aa0f373..7220d6cce1 100644
--- a/src/cpu/kernels/activation/list.h
+++ b/src/cpu/kernels/activation/list.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2021 Arm Limited.
+ * Copyright (c) 2020-2022 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,6 +32,7 @@ namespace cpu
void func_name(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
DECLARE_ACTIVATION_KERNEL(neon_qasymm8_activation);
+DECLARE_ACTIVATION_KERNEL(neon_qasymm8_hardswish_lut);
DECLARE_ACTIVATION_KERNEL(sve2_qasymm8_activation);
DECLARE_ACTIVATION_KERNEL(neon_qasymm8_signed_activation);
DECLARE_ACTIVATION_KERNEL(sve2_qasymm8_signed_activation);
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp
index 1f43de49d2..e45b7fa5ad 100644
--- a/tests/validation/NEON/ActivationLayer.cpp
+++ b/tests/validation/NEON/ActivationLayer.cpp
@@ -309,7 +309,7 @@ DATA_TEST_CASE(KernelSelection, framework::DatasetMode::ALL, concat(concat(
cpu_isa.sve2 = (cpu_ext == "SVE2");
cpu_isa.fp16 = (data_type == DataType::F16);
- const auto *selected_impl = CpuActivationKernel::get_implementation(DataTypeISASelectorData{data_type, cpu_isa}, cpu::KernelSelectionType::Preferred);
+ const auto *selected_impl = CpuActivationKernel::get_implementation(ActivationDataTypeISASelectorData{data_type, cpu_isa,ActivationLayerInfo::ActivationFunction::BOUNDED_RELU}, cpu::KernelSelectionType::Preferred);
ARM_COMPUTE_ERROR_ON_NULLPTR(selected_impl);