aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/NEON/functions
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-05-21 13:32:43 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-06-03 14:51:29 +0000
commit4c5469b192665c94118a8a558787cb9cec2d0765 (patch)
tree168aa969de8243bdbb1f25247dd9f54d037ae32c /src/runtime/NEON/functions
parent43a129e94df41f9ac8bc78b702da5a387ada0494 (diff)
downloadComputeLibrary-4c5469b192665c94118a8a558787cb9cec2d0765.tar.gz
COMPMID-2225: Add interface support for new quantized data types.
Add support for: -QSYMM8, 8-bit quantized symmetric -QSYMM8_PER_CHANNEL, 8-bit quantized symmetric with per channel quantization Change-Id: I00c4ff98e44af37419470af61419ee95d0de2463 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/1236 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/runtime/NEON/functions')
-rw-r--r--src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp22
-rw-r--r--src/runtime/NEON/functions/NEFullyConnectedLayer.cpp16
-rw-r--r--src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp44
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp8
-rw-r--r--src/runtime/NEON/functions/NEPoolingLayer.cpp4
-rw-r--r--src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp6
6 files changed, 54 insertions, 46 deletions
diff --git a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
index 3bb69b1ffc..4bc8439d93 100644
--- a/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthwiseConvolutionLayer.cpp
@@ -72,7 +72,7 @@ void NEDepthwiseConvolutionLayer3x3::configure_generic(ITensor
_memory_group.manage(&_accumulator);
_accumulator.allocator()->init(TensorInfo(accum_shape, 1, DataType::S32, output->info()->quantization_info()));
_accumulator.info()->set_data_layout(accum_layout);
- zero_value = PixelValue(static_cast<uint32_t>(input->info()->quantization_info().offset));
+ zero_value = PixelValue(static_cast<uint32_t>(input->info()->quantization_info().uniform().offset));
}
if(!_is_nchw)
@@ -109,13 +109,15 @@ void NEDepthwiseConvolutionLayer3x3::configure_generic(ITensor
// Configure biases accumulation
if(_is_quantized)
{
- const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input->info()->quantization_info() : output->info()->quantization_info();
+ const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
+ const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform();
+ const UniformQuantizationInfo oq_info = (output->info()->total_size() == 0) ? iq_info : output->info()->quantization_info().uniform();
- float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale;
+ float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale;
int output_multiplier;
int output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
- _output_stage_kernel.configure(&_accumulator, biases, _is_nchw ? output : &_permuted_output, output_multiplier, output_shift, output_quant_info.offset);
+ _output_stage_kernel.configure(&_accumulator, biases, _is_nchw ? output : &_permuted_output, output_multiplier, output_shift, oq_info.offset);
_accumulator.allocator()->allocate();
}
else if(_has_bias)
@@ -459,13 +461,15 @@ void NEDepthwiseConvolutionLayer::configure(ITensor *input, const ITensor *weigh
// Output staged configuration
if(_is_quantized)
{
- const QuantizationInfo output_quant_info = output->info()->quantization_info();
+ const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
+ const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform();
+ const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
- float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output_quant_info.scale;
+ float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale;
int output_multiplier;
int output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
- _output_stage_kernel.configure(&_output_reshaped, biases, output_to_use, output_multiplier, output_shift, output_quant_info.offset);
+ _output_stage_kernel.configure(&_output_reshaped, biases, output_to_use, output_multiplier, output_shift, oq_info.offset);
_output_reshaped.allocator()->allocate();
}
@@ -483,8 +487,8 @@ void NEDepthwiseConvolutionLayer::configure(ITensor *input, const ITensor *weigh
PixelValue zero_w(static_cast<int32_t>(0));
if(_is_quantized)
{
- zero_in = PixelValue(static_cast<int32_t>(input->info()->quantization_info().offset));
- zero_w = PixelValue(static_cast<int32_t>(weights->info()->quantization_info().offset));
+ zero_in = PixelValue(static_cast<int32_t>(input->info()->quantization_info().uniform().offset));
+ zero_w = PixelValue(static_cast<int32_t>(weights->info()->quantization_info().uniform().offset));
}
BorderSize border_size = _v2mm_kernel.border_size();
_v2mm_input_fill_border.configure(&_input_reshaped, border_size, BorderMode::CONSTANT, zero_in);
diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
index e1a17db6d4..7a74a7ea90 100644
--- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
+++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp
@@ -44,8 +44,8 @@ Status validate_mm(const ITensorInfo &input, const ITensorInfo &weights, const I
{
// Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
// Extract and negate input and weights offset
- const QuantizationInfo input_quantization_info(input.quantization_info().scale, -input.quantization_info().offset);
- const QuantizationInfo weights_quantization_info(weights.quantization_info().scale, -weights.quantization_info().offset);
+ const QuantizationInfo input_quantization_info(input.quantization_info().uniform().scale, -input.quantization_info().uniform().offset);
+ const QuantizationInfo weights_quantization_info(weights.quantization_info().uniform().scale, -weights.quantization_info().uniform().offset);
// Validate gemmlowp function
ARM_COMPUTE_RETURN_ON_ERROR(NEGEMMLowpMatrixMultiplyCore::validate(&input.clone()->set_quantization_info(input_quantization_info),
@@ -90,8 +90,8 @@ void NEFullyConnectedLayer::configure_mm(const ITensor *input, const ITensor *we
const QuantizationInfo input_quantization_info = input->info()->quantization_info();
const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
- input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
- weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
+ input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.uniform().scale, -input_quantization_info.uniform().offset));
+ weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
// Configure gemmlowp function
_mm_gemmlowp.configure(input, weights, nullptr, output);
@@ -227,11 +227,15 @@ void NEFullyConnectedLayer::configure(const ITensor *input, const ITensor *weigh
// Configure output stage for asymmetric quantized types
if(_is_quantized)
{
- float multiplier = input->info()->quantization_info().scale * weights->info()->quantization_info().scale / output->info()->quantization_info().scale;
+ const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
+ const UniformQuantizationInfo wq_info = weights->info()->quantization_info().uniform();
+ const UniformQuantizationInfo oq_info = output->info()->quantization_info().uniform();
+
+ float multiplier = (iq_info.scale * wq_info.scale) / oq_info.scale;
int output_multiplier;
int output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
- _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, output->info()->quantization_info().offset);
+ _gemmlowp_output_stage.configure(&_gemmlowp_output, biases, output, output_multiplier, output_shift, oq_info.offset);
_gemmlowp_output.allocator()->allocate();
}
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index a2c4e8a8b1..c011ddd18f 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -109,15 +109,15 @@ void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *w
{
// Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
// Extract and negate input and weights offset
- const QuantizationInfo input_quantization_info = input->info()->quantization_info();
- const QuantizationInfo weights_quantization_info = weights->info()->quantization_info();
+ const UniformQuantizationInfo iqinfo = input->info()->quantization_info().uniform();
+ const UniformQuantizationInfo wqinfo = weights->info()->quantization_info().uniform();
- input->info()->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
- weights->info()->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
+ input->info()->set_quantization_info(QuantizationInfo(iqinfo.scale, -iqinfo.offset));
+ weights->info()->set_quantization_info(QuantizationInfo(wqinfo.scale, -wqinfo.offset));
- const QuantizationInfo output_quant_info = (output->info()->total_size() == 0) ? input_quantization_info : output->info()->quantization_info();
+ const UniformQuantizationInfo oqinfo = (output->info()->total_size() == 0) ? iqinfo : output->info()->quantization_info().uniform();
- float multiplier = input_quantization_info.scale * weights->info()->quantization_info().scale / output_quant_info.scale;
+ float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
int output_multiplier;
int output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
@@ -132,10 +132,10 @@ void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *w
};
if(_is_activationlayer_enabled && supported_acts.count(act_info.activation()) != 0)
{
- const int a_const_int = output_quant_info.quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP);
- const int b_const_int = output_quant_info.quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP);
+ const int a_const_int = quantize_qasymm8(act_info.a(), oqinfo);
+ const int b_const_int = quantize_qasymm8(act_info.b(), oqinfo);
- min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int;
+ min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? oqinfo.offset : b_const_int;
max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int;
_is_activationlayer_enabled = false;
@@ -143,7 +143,7 @@ void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *w
GEMMLowpOutputStageInfo output_info;
output_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
- output_info.gemmlowp_offset = output_quant_info.offset;
+ output_info.gemmlowp_offset = oqinfo.offset;
output_info.gemmlowp_multiplier = output_multiplier;
output_info.gemmlowp_shift = output_shift;
output_info.gemmlowp_min_bound = min_activation;
@@ -152,8 +152,8 @@ void NEGEMMConvolutionLayer::configure_mm(const ITensor *input, const ITensor *w
_mm_gemmlowp.configure(input, weights, biases, output, GEMMInfo(false, false, true, gemm_3d_depth, _skip_im2col, false, output_info));
// Revert back QuantizatioInfo as input and weights could be used in other convolution layers
- input->info()->set_quantization_info(input_quantization_info);
- weights->info()->set_quantization_info(weights_quantization_info);
+ input->info()->set_quantization_info(QuantizationInfo(iqinfo.scale, iqinfo.offset));
+ weights->info()->set_quantization_info(QuantizationInfo(wqinfo.scale, wqinfo.offset));
}
else
{
@@ -174,17 +174,17 @@ Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens
{
// Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
// Extract and negate input and weights offset
- const QuantizationInfo input_quantization_info = input->quantization_info();
- const QuantizationInfo weights_quantization_info = weights->quantization_info();
+ const UniformQuantizationInfo iqinfo = input->quantization_info().uniform();
+ const UniformQuantizationInfo wqinfo = weights->quantization_info().uniform();
std::unique_ptr<ITensorInfo> input_qa = input->clone();
std::unique_ptr<ITensorInfo> weights_qa = weights->clone();
- input_qa->set_quantization_info(QuantizationInfo(input_quantization_info.scale, -input_quantization_info.offset));
- weights_qa->set_quantization_info(QuantizationInfo(weights_quantization_info.scale, -weights_quantization_info.offset));
+ input_qa->set_quantization_info(QuantizationInfo(iqinfo.scale, -iqinfo.offset));
+ weights_qa->set_quantization_info(QuantizationInfo(wqinfo.scale, -wqinfo.offset));
- const QuantizationInfo output_quant_info = (output->total_size() == 0) ? input_quantization_info : output->quantization_info();
+ const UniformQuantizationInfo oqinfo = (output->total_size() == 0) ? iqinfo : output->quantization_info().uniform();
- float multiplier = input_quantization_info.scale * weights->quantization_info().scale / output_quant_info.scale;
+ float multiplier = iqinfo.scale * wqinfo.scale / oqinfo.scale;
int output_multiplier;
int output_shift;
quantization::calculate_quantized_multiplier_less_than_one(multiplier, &output_multiplier, &output_shift);
@@ -199,16 +199,16 @@ Status NEGEMMConvolutionLayer::validate_mm(const ITensorInfo *input, const ITens
};
if(is_activation_enabled && supported_acts.count(act_info.activation()) != 0)
{
- const int a_const_int = output_quant_info.quantize(act_info.a(), RoundingPolicy::TO_NEAREST_UP);
- const int b_const_int = output_quant_info.quantize(act_info.b(), RoundingPolicy::TO_NEAREST_UP);
+ const int a_const_int = quantize_qasymm8(act_info.a(), oqinfo);
+ const int b_const_int = quantize_qasymm8(act_info.b(), oqinfo);
- min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? output_quant_info.offset : b_const_int;
+ min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? oqinfo.offset : b_const_int;
max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? 255 : a_const_int;
}
GEMMLowpOutputStageInfo output_info;
output_info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
- output_info.gemmlowp_offset = output_quant_info.offset;
+ output_info.gemmlowp_offset = oqinfo.offset;
output_info.gemmlowp_multiplier = output_multiplier;
output_info.gemmlowp_shift = output_shift;
output_info.gemmlowp_min_bound = min_activation;
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index 54f49a6707..d8773e37ab 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -61,8 +61,8 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
_mtx_b_reshape_kernel = nullptr;
// Set internal variables
- _a_offset = a->info()->quantization_info().offset;
- _b_offset = b->info()->quantization_info().offset;
+ _a_offset = a->info()->quantization_info().uniform().offset;
+ _b_offset = b->info()->quantization_info().uniform().offset;
_run_vector_matrix_multiplication = a->info()->dimension(1) < 2;
_reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
_is_prepared = false;
@@ -224,8 +224,8 @@ Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITenso
TensorInfo tmp_b_info{};
TensorInfo mm_result_s32_info{};
- int32_t a_offset = a->quantization_info().offset;
- int32_t b_offset = b->quantization_info().offset;
+ int32_t a_offset = a->quantization_info().uniform().offset;
+ int32_t b_offset = b->quantization_info().uniform().offset;
const bool reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
bool fuse_output_stage = gemm_info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE;
diff --git a/src/runtime/NEON/functions/NEPoolingLayer.cpp b/src/runtime/NEON/functions/NEPoolingLayer.cpp
index cbfd68485f..d92086d08d 100644
--- a/src/runtime/NEON/functions/NEPoolingLayer.cpp
+++ b/src/runtime/NEON/functions/NEPoolingLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,7 +55,7 @@ void NEPoolingLayer::configure(ITensor *input, ITensor *output, const PoolingLay
PixelValue zero_value(0.f);
if(is_data_type_quantized_asymmetric(input->info()->data_type()) && !pool_info.exclude_padding())
{
- zero_value = PixelValue(static_cast<uint32_t>(input->info()->quantization_info().offset));
+ zero_value = PixelValue(static_cast<uint32_t>(input->info()->quantization_info().uniform().offset));
}
_border_handler.configure(input, _pooling_layer_kernel.border_size(), border_mode, zero_value);
break;
diff --git a/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp b/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp
index 049bf66689..0499d9930f 100644
--- a/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp
+++ b/src/runtime/NEON/functions/assembly/NEDepthwiseConvolutionAssemblyDispatch.cpp
@@ -72,9 +72,9 @@ std::unique_ptr<depthwise::IDepthwiseConvolution> create_convolver(const ITensor
// Create quantized convolver
if(data_type == DataType::QASYMM8)
{
- const QuantizationInfo &input_qinfo = input->info()->quantization_info();
- const QuantizationInfo &weights_qinfo = weights->info()->quantization_info();
- const QuantizationInfo &output_qinfo = output->info()->quantization_info();
+ const UniformQuantizationInfo input_qinfo = input->info()->quantization_info().uniform();
+ const UniformQuantizationInfo weights_qinfo = weights->info()->quantization_info().uniform();
+ const UniformQuantizationInfo output_qinfo = output->info()->quantization_info().uniform();
// Check that quantization info are in the range [0, 255]
ARM_COMPUTE_ERROR_ON(input_qinfo.offset < 0 || input_qinfo.offset > 255);