aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL/kernels/CLActivationLayerKernel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/CL/kernels/CLActivationLayerKernel.cpp')
-rw-r--r--src/core/CL/kernels/CLActivationLayerKernel.cpp36
1 files changed, 23 insertions, 13 deletions
diff --git a/src/core/CL/kernels/CLActivationLayerKernel.cpp b/src/core/CL/kernels/CLActivationLayerKernel.cpp
index 34d1298d61..97a0ff6c6c 100644
--- a/src/core/CL/kernels/CLActivationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLActivationLayerKernel.cpp
@@ -46,9 +46,9 @@ namespace
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QASYMM8, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QASYMM8, DataType::QSYMM16, DataType::F16, DataType::F32);
- static std::set<ActivationLayerInfo::ActivationFunction> qs8_supported_activations =
+ static std::set<ActivationLayerInfo::ActivationFunction> quantized_supported_activations =
{
ActivationLayerInfo::ActivationFunction::RELU,
ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
@@ -60,11 +60,15 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
const QuantizationInfo &oq_info = (output != nullptr) ? output->quantization_info() : input->quantization_info();
const ActivationLayerInfo::ActivationFunction f_act = act_info.activation();
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized_asymmetric(data_type) && (qs8_supported_activations.count(f_act) == 0),
- "For QASYMM8 only tanh, logistic, relu and lower/upper bounded relu are supported");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized(data_type) && (quantized_supported_activations.count(f_act) == 0),
+ "For Quantized data type only tanh, logistic, relu and lower/upper bounded relu are supported");
+
ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::TANH) && (oq_info != QuantizationInfo(1.f / 128.f, 128)));
ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) && (oq_info != QuantizationInfo(1.f / 256.f, 0)));
+ ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_symmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::TANH) && (oq_info != QuantizationInfo(1.f / 32768.f, 0)));
+ ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_symmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) && (oq_info != QuantizationInfo(1.f / 32768.f, 0)));
+
// Checks performed when output is configured
if((output != nullptr) && (output->total_size() != 0))
{
@@ -135,16 +139,22 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act
int b_const_int = 0;
const ActivationLayerInfo::ActivationFunction f_act = act_info.activation();
- const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(dt);
+ const bool is_quantized = is_data_type_quantized(dt);
const bool perform_activation_in_float = (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) || (f_act == ActivationLayerInfo::ActivationFunction::TANH);
// Create quantized version of constants a, b if needed
- if(is_quantized_asymmetric)
+ if(dt == DataType::QASYMM8)
{
const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
a_const_int = quantize_qasymm8(a_const, iq_info);
b_const_int = quantize_qasymm8(b_const, iq_info);
}
+ else if(dt == DataType::QSYMM16)
+ {
+ const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
+ a_const_int = quantize_qsymm16(a_const, iq_info);
+ b_const_int = quantize_qsymm16(b_const, iq_info);
+ }
// Set build options
CLBuildOptions build_opts;
@@ -155,7 +165,7 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act
build_opts.add_option(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)));
// Set A, B constants in build options
- if(is_quantized_asymmetric && !perform_activation_in_float)
+ if(is_quantized && !perform_activation_in_float)
{
build_opts.add_option(("-DA_VAL=" + support::cpp11::to_string(a_const_int)));
build_opts.add_option(("-DB_VAL=" + support::cpp11::to_string(b_const_int)));
@@ -167,14 +177,14 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act
}
// Set quantization info build options
- if(is_quantized_asymmetric)
+ if(is_quantized)
{
const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
// Quantized value of 0 corresponds to the offset o1
- build_opts.add_option(("-DCONST_0=" + support::cpp11::to_string(iq_info.offset)));
+ build_opts.add_option(("-DCONST_0=" + (is_data_type_quantized_asymmetric(dt) ? support::cpp11::to_string(iq_info.offset) : "0")));
build_opts.add_option(("-DS1_VAL=" + float_to_string_with_full_precision(iq_info.scale)));
- build_opts.add_option(("-DO1_VAL=" + support::cpp11::to_string(iq_info.offset)));
+ build_opts.add_option_if(is_data_type_quantized_asymmetric(dt), "-DO1_VAL=" + support::cpp11::to_string(iq_info.offset));
// Set scale and offset of the input and output if they have different quantization info
if(output != nullptr)
@@ -184,16 +194,16 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act
if(iq_info != oq_info)
{
build_opts.add_option(("-DS2_VAL=" + float_to_string_with_full_precision(oq_info.scale)));
- build_opts.add_option(("-DO2_VAL=" + support::cpp11::to_string(oq_info.offset)));
+ build_opts.add_option_if(is_data_type_quantized_asymmetric(dt), "-DO2_VAL=" + support::cpp11::to_string(oq_info.offset));
}
}
}
// Create kernel
std::string kernel_name = std::string("activation_layer");
- if(is_quantized_asymmetric)
+ if(is_quantized)
{
- kernel_name += perform_activation_in_float ? std::string("_qa8_f32") : std::string("_qa8");
+ kernel_name += perform_activation_in_float ? std::string("_quant_f32") : std::string("_quant");
}
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));