aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL/kernels/CLQuantizationLayerKernel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/CL/kernels/CLQuantizationLayerKernel.cpp')
-rw-r--r--src/core/CL/kernels/CLQuantizationLayerKernel.cpp52
1 files changed, 47 insertions, 5 deletions
diff --git a/src/core/CL/kernels/CLQuantizationLayerKernel.cpp b/src/core/CL/kernels/CLQuantizationLayerKernel.cpp
index 3d7aff0712..ab3b5d271d 100644
--- a/src/core/CL/kernels/CLQuantizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLQuantizationLayerKernel.cpp
@@ -41,7 +41,7 @@ namespace
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F32, DataType::F16);
ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
// Output must always be initialized
@@ -62,8 +62,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
const bool multi_access_x = (input_width_x / vec_size_x > 0);
if(multi_access_x)
{
- win.set(Window::DimX,
- Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
+ win.set(Window::DimX, Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x));
}
Coordinates coord;
@@ -99,10 +98,53 @@ void CLQuantizationLayerKernel::configure(const ICLTensor *input, ICLTensor *out
const UniformQuantizationInfo qinfo = output->info()->quantization_info().uniform();
const DataType output_data_type = output->info()->data_type();
+ float scale_to_apply = qinfo.scale;
+ int32_t offset_to_apply = qinfo.offset;
+ if(is_data_type_quantized_asymmetric(_input->info()->data_type()))
+ {
+ /*
+ * In case of requantization of a quantized input tensor to an output tensor with another quantization
+ * instead of of apply dequantization and then a quantization functions, we just compute new scale and
+ * offset to apply.
+ *
+ * Assuming:
+ * - q_i as input quantized value
+ * - q_o as output quantized value
+ * - z_i as input quantization offset value
+ * - z_o as output quantization offset value
+ * - s_i as input quantization scale value
+ * - s_o as output quantization scale value
+ * - z_n as new quantization offset value
+ * - s_n as new quantization scale value
+ *
+ * q_o = ( q_i - z_i ) * s_i / s_o + z_o
+ *
+ * We can rewrite the formula as:
+ *
+ * q_o = ( q_i * s_i / s_o ) - z_i * s_i / s_o + z_o
+ *
+ * q_o = q_i / s_n + z_n
+ *
+ * Where:
+ *
+ * s_n = s_o / s_i
+ *
+ * z_n = - z_i * s_i / s_o + z_o
+ *
+ */
+ const UniformQuantizationInfo qinfo_in = _input->info()->quantization_info().uniform();
+ scale_to_apply /= qinfo_in.scale;
+ // In order to minimize flooring we convert the offset to a float,
+ // then compute the new offset in the float domain,
+ // finally we convert it back as int32_t
+ offset_to_apply -= static_cast<int32_t>(static_cast<float>(qinfo_in.offset) * qinfo_in.scale / qinfo.scale);
+ }
+
// Create kernel
CLBuildOptions build_opts;
- build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(qinfo.scale));
- build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(qinfo.offset));
+ build_opts.add_option_if(is_data_type_float(_input->info()->data_type()), "-DIS_FLOAT");
+ build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(scale_to_apply));
+ build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(offset_to_apply));
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(input->info()->data_type()));
build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output_data_type));