aboutsummaryrefslogtreecommitdiff
path: root/src/cpu/kernels/CpuActivationKernel.cpp
diff options
context:
space:
mode:
authorViet-Hoa Do <viet-hoa.do@arm.com>2022-06-21 15:56:15 +0100
committerViet-Hoa Do <viet-hoa.do@arm.com>2022-06-29 13:30:06 +0000
commitb042e39060901b44e615b923b5723c04d9b42a95 (patch)
treee23fd9b89c753f9731e1e8ec4a0d9ca468f9f683 /src/cpu/kernels/CpuActivationKernel.cpp
parent13f96d0a5efc140785a6de58bff9b24b80dd0cfd (diff)
downloadComputeLibrary-b042e39060901b44e615b923b5723c04d9b42a95.tar.gz
Add LUT-based leaky relu for QASYMM8 on CPU
* Add LUT generation function for Leaky ReLU. * Some additional changes in the existing LUT implementation: + Bring back the NEON implementation of hard swish for 32-bit build. Library size of 64-bit build is not affected. + Add some extra #ifdef to remove unnecessary code in 32-bit build. Resolves: COMPMID-5386 Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com> Change-Id: I1ea49611cc922765ee741e31138c888401d33e9b Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7845 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/cpu/kernels/CpuActivationKernel.cpp')
-rw-r--r--src/cpu/kernels/CpuActivationKernel.cpp16
1 files changed, 10 insertions, 6 deletions
diff --git a/src/cpu/kernels/CpuActivationKernel.cpp b/src/cpu/kernels/CpuActivationKernel.cpp
index 330de1ece2..9eaf44af51 100644
--- a/src/cpu/kernels/CpuActivationKernel.cpp
+++ b/src/cpu/kernels/CpuActivationKernel.cpp
@@ -45,11 +45,13 @@ namespace
{
static const std::vector<CpuActivationKernel::ActivationKernel> available_kernels =
{
- { // neon LUT implementantion of HARD_SWISH takes precedence
- "neon_qu8_activation_hardswish_lut",
- [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8 && data.f == ActivationLayerInfo::ActivationFunction::HARD_SWISH; },
- REGISTER_QASYMM8_NEON(arm_compute::cpu::neon_qasymm8_hardswish_lut)
+#ifdef __aarch64__
+ { // Neon LUT implementantion takes precedence
+ "neon_qu8_activation_lut",
+ [](const ActivationDataTypeISASelectorData & data) { return ActivationLayerInfo::is_lut_supported(data.f, data.dt); },
+ REGISTER_QASYMM8_NEON(arm_compute::cpu::neon_qasymm8_activation_lut)
},
+#endif // __aarch64__
{
"sve2_qu8_activation",
[](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8 && data.isa.sve2; },
@@ -87,7 +89,7 @@ static const std::vector<CpuActivationKernel::ActivationKernel> available_kernel
},
{
"neon_qu8_activation",
- [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8 && data.f != ActivationLayerInfo::ActivationFunction::HARD_SWISH; },
+ [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8; },
REGISTER_QASYMM8_NEON(arm_compute::cpu::neon_qasymm8_activation)
},
{
@@ -188,10 +190,12 @@ void CpuActivationKernel::configure(const ITensorInfo *src, ITensorInfo *dst, Ac
_run_method = uk->ukernel;
_name = std::string("CpuActivationKernel").append("/").append(uk->name);
- if(activation_info.activation() == ActivationLayerInfo::ActivationFunction::HARD_SWISH && src->data_type() == DataType::QASYMM8)
+#ifdef __aarch64__
+ if(ActivationLayerInfo::is_lut_supported(activation_info.activation(), src->data_type()))
{
activation_info.init_lut(src->quantization_info().uniform(),(dst)?dst->quantization_info().uniform():src->quantization_info().uniform());
}
+#endif // __aarch64__
_act_info = activation_info;
// Configure kernel window