diff options
author | Pablo Marquez Tello <pablo.tello@arm.com> | 2023-06-09 11:22:29 +0100 |
---|---|---|
committer | Pablo Marquez Tello <pablo.tello@arm.com> | 2023-06-12 14:38:56 +0000 |
commit | 48cfd5f7895f13167e4e9cd974dbc1e983e04ed7 (patch) | |
tree | c0fd7affd81be24dffd2d506a3940879f299aff4 /src/cpu/kernels | |
parent | 78a17a1a4f81b2ba8462941c23b8914380257579 (diff) | |
download | ComputeLibrary-48cfd5f7895f13167e4e9cd974dbc1e983e04ed7.tar.gz |
Refactor activation LUT computation
* Moving the code out of Types.h will help
with the compilation time.
* Added LUT support for all other activation functions.
* Resolves COMPMID-6292
Change-Id: I1b5f0b21f03237447163276b8796b2aeb3fdd45c
Signed-off-by: Pablo Marquez Tello <pablo.tello@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9749
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/cpu/kernels')
-rw-r--r-- | src/cpu/kernels/CpuActivationKernel.cpp | 77 | ||||
-rw-r--r-- | src/cpu/kernels/activation/generic/neon/lut.cpp | 2 | ||||
-rw-r--r-- | src/cpu/kernels/activation/generic/sve2/lut.cpp | 2 |
3 files changed, 75 insertions, 6 deletions
diff --git a/src/cpu/kernels/CpuActivationKernel.cpp b/src/cpu/kernels/CpuActivationKernel.cpp index 20a8489cdd..f4bd4e6cad 100644 --- a/src/cpu/kernels/CpuActivationKernel.cpp +++ b/src/cpu/kernels/CpuActivationKernel.cpp @@ -48,7 +48,7 @@ static const std::vector<CpuActivationKernel::ActivationKernel> available_kernel #ifdef ARM_COMPUTE_ENABLE_SVE { "sve2_q8_activation_lut", - [](const ActivationDataTypeISASelectorData & data) { return ActivationLayerInfo::is_lut_supported(data.f, data.dt) && data.cpumodel == CPUModel::A510 && data.isa.sve2; }, + [](const ActivationDataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8 || data.dt == DataType::QASYMM8_SIGNED) && data.cpumodel == CPUModel::A510 && data.isa.sve2; }, REGISTER_QASYMM8_SVE2(arm_compute::cpu::sve2_q8_activation_lut) }, #endif // ARM_COMPUTE_ENABLE_SVE @@ -56,7 +56,7 @@ static const std::vector<CpuActivationKernel::ActivationKernel> available_kernel { // Neon LUT implementantion takes precedence "neon_q8_activation_lut", - [](const ActivationDataTypeISASelectorData & data) { return ActivationLayerInfo::is_lut_supported(data.f, data.dt); }, + [](const ActivationDataTypeISASelectorData & data) { return data.dt == DataType::QASYMM8 || data.dt == DataType::QASYMM8_SIGNED; }, REGISTER_Q8_NEON(arm_compute::cpu::neon_q8_activation_lut) }, #endif // __aarch64__ @@ -184,6 +184,72 @@ std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *src, return std::make_pair(Status{}, win); } +#ifdef __aarch64__ +void init_lut(ActivationLayerInfo::ActivationFunction act_func, DataType data_type, + const UniformQuantizationInfo &qi_in, const UniformQuantizationInfo &qi_out, + ActivationLayerInfo::LookupTable256 &lut, float a, float b) +{ + for(size_t i = 0; i < lut.size(); ++i) + { + float tmp_f = (data_type == DataType::QASYMM8) ? dequantize_qasymm8(i, qi_in) : dequantize_qasymm8_signed(i, qi_in); + switch(act_func) + { + case ActivationLayerInfo::ActivationFunction::HARD_SWISH: + tmp_f = tmp_f * ((std::min(std::max((tmp_f + 3), 0.0f), 6.0f)) * 0.166666667f); + break; + case ActivationLayerInfo::ActivationFunction::LEAKY_RELU: + tmp_f = tmp_f > 0 ? tmp_f : tmp_f * a; + break; + case ActivationLayerInfo::ActivationFunction::LOGISTIC: + tmp_f = 1.f / (1.f + std::exp(-tmp_f)); + break; + case ActivationLayerInfo::ActivationFunction::ABS: + tmp_f = std::abs(tmp_f); + break; + case ActivationLayerInfo::ActivationFunction::LINEAR: + tmp_f = a * tmp_f + b; + break; + case ActivationLayerInfo::ActivationFunction::RELU: + tmp_f = std::max<>(0.f, tmp_f); + break; + case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU: + tmp_f = std::min<>(a, std::max(0.f, tmp_f)); + break; + case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU: + tmp_f = std::min<>(a, std::max<>(b, tmp_f)); + break; + case ActivationLayerInfo::ActivationFunction::SOFT_RELU: + tmp_f = (tmp_f > 12.f) ? tmp_f : std::log(1.f + std::exp(tmp_f)); + break; + case ActivationLayerInfo::ActivationFunction::ELU: + tmp_f = (tmp_f >= 0) ? tmp_f : a * (std::exp(tmp_f) - 1); + break; + case ActivationLayerInfo::ActivationFunction::SQRT: + tmp_f = std::sqrt(tmp_f); + break; + case ActivationLayerInfo::ActivationFunction::SQUARE: + tmp_f = tmp_f * tmp_f; + break; + case ActivationLayerInfo::ActivationFunction::TANH: + tmp_f = a * std::tanh(b * tmp_f); + break; + case ActivationLayerInfo::ActivationFunction::IDENTITY: + break; + case ActivationLayerInfo::ActivationFunction::SWISH: + tmp_f = tmp_f / (1.f + std::exp(-a * tmp_f)); + break; + case ActivationLayerInfo::ActivationFunction::GELU: + tmp_f = tmp_f * (0.5f * (1.0f + erff(tmp_f / 1.41421356237f))); + break; + default: + ARM_COMPUTE_ERROR("Not supported"); + tmp_f = 0; + break; + } + lut[i] = (data_type == DataType::QASYMM8) ? quantize_qasymm8(tmp_f, qi_out) : quantize_qasymm8_signed(tmp_f, qi_out); + } +} +#endif // __aarch64__ } // namespace void CpuActivationKernel::configure(const ITensorInfo *src, ITensorInfo *dst, ActivationLayerInfo activation_info) @@ -205,9 +271,12 @@ void CpuActivationKernel::configure(const ITensorInfo *src, ITensorInfo *dst, Ac _name = std::string("CpuActivationKernel").append("/").append(uk->name); #ifdef __aarch64__ - if(ActivationLayerInfo::is_lut_supported(activation_info.activation(), src->data_type())) + if(src->data_type() == DataType::QASYMM8 || src->data_type() == DataType::QASYMM8_SIGNED) { - activation_info.init_lut(src->data_type(), src->quantization_info().uniform(), (dst) ? dst->quantization_info().uniform() : src->quantization_info().uniform()); + ActivationLayerInfo::LookupTable256 tmp_lut; + init_lut(activation_info.activation(), src->data_type(), src->quantization_info().uniform(), (dst) ? dst->quantization_info().uniform() : src->quantization_info().uniform(), + tmp_lut, activation_info.a(), activation_info.b()); + activation_info.setLookupTable256(tmp_lut); } #endif // __aarch64__ _act_info = activation_info; diff --git a/src/cpu/kernels/activation/generic/neon/lut.cpp b/src/cpu/kernels/activation/generic/neon/lut.cpp index 90690ffcaa..b44347550e 100644 --- a/src/cpu/kernels/activation/generic/neon/lut.cpp +++ b/src/cpu/kernels/activation/generic/neon/lut.cpp @@ -32,7 +32,7 @@ namespace cpu #ifdef __aarch64__ void neon_q8_activation_lut(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window) { - ARM_COMPUTE_ERROR_ON(!ActivationLayerInfo::is_lut_supported(act_info.activation(), src->info()->data_type())); + ARM_COMPUTE_ERROR_ON(src->info()->data_type() != DataType::QASYMM8 && src->info()->data_type() != DataType::QASYMM8_SIGNED); const auto window_end_x = window.x().end(); Window win_collapsed = window.collapse_if_possible(window, Window::DimZ); win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1)); diff --git a/src/cpu/kernels/activation/generic/sve2/lut.cpp b/src/cpu/kernels/activation/generic/sve2/lut.cpp index 2e5975744b..9dbeb305ff 100644 --- a/src/cpu/kernels/activation/generic/sve2/lut.cpp +++ b/src/cpu/kernels/activation/generic/sve2/lut.cpp @@ -32,7 +32,7 @@ namespace cpu #ifdef __aarch64__ void sve2_q8_activation_lut(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window) { - ARM_COMPUTE_ERROR_ON(!ActivationLayerInfo::is_lut_supported(act_info.activation(), src->info()->data_type())); + ARM_COMPUTE_ERROR_ON(src->info()->data_type() != DataType::QASYMM8 && src->info()->data_type() != DataType::QASYMM8_SIGNED); const auto window_end_x = window.x().end(); Window win_collapsed = window.collapse_if_possible(window, Window::DimZ); win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1)); |