From 4715cf9da26c4e914b9528f736e77d6773285169 Mon Sep 17 00:00:00 2001 From: Sang-Hoon Park Date: Wed, 8 Jan 2020 16:02:47 +0000 Subject: COMPMID-2760: add support for QASYMM8_SIGNED to CLGEMMConvolutionLayer Signed-off-by: Sang-Hoon Park Change-Id: I55ab81d0f96c78af0396652cacf6640fc98ef3c2 Reviewed-on: https://review.mlplatform.org/c/2584 Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) (limited to 'src/runtime/CL/functions') diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp index dbb68619db..682812b1c8 100644 --- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp @@ -345,11 +345,7 @@ void CLGEMMConvolutionLayer::configure(const ICLTensor *input, const ICLTensor * { if(supported_acts.count(act_info.activation()) != 0) { - const int a_const_int = quantize_qasymm8(act_info.a(), output_quant_info); - const int b_const_int = quantize_qasymm8(act_info.b(), output_quant_info); - - min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int; - max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int; + std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act_info, data_type, output_quant_info); } else { @@ -402,7 +398,7 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output); ARM_COMPUTE_RETURN_ERROR_ON_MSG(weights_info.are_reshaped(), "Weights already reshaped are not supported!"); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::F16, DataType::F32); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32); const bool is_quantized_per_channel = is_data_type_quantized_per_channel(weights->data_type()); if(is_quantized_per_channel) @@ -559,11 +555,7 @@ Status CLGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI { if(supported_acts.count(act_info.activation()) != 0) { - const int a_const_int = quantize_qasymm8(act_info.a(), output_quant_info); - const int b_const_int = quantize_qasymm8(act_info.b(), output_quant_info); - - min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int; - max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int; + std::tie(min_activation, max_activation) = get_quantized_activation_min_max(act_info, data_type, output_quant_info); } else { -- cgit v1.2.1