From 2f60221e60b69852918581b4eb450a0f81455a46 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Thu, 30 Jan 2020 17:30:32 +0000 Subject: COMPMID-3046: Add CLRequantizationLayerKernel Change-Id: I034f5aa023642f2323372495ddd14fc62b4c12e0 Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2681 Comments-Addressed: Arm Jenkins Reviewed-by: Giorgio Arena Tested-by: Arm Jenkins --- .../core/CL/kernels/CLQuantizationLayerKernel.h | 4 +- .../runtime/CL/functions/CLQuantizationLayer.h | 4 +- src/core/CL/cl_kernels/quantization_layer.cl | 31 ++++++--- src/core/CL/kernels/CLQuantizationLayerKernel.cpp | 52 +++++++++++++-- tests/validation/CL/QuantizationLayer.cpp | 73 ++++++++++++++++++++-- .../validation/fixtures/QuantizationLayerFixture.h | 29 ++++++--- tests/validation/reference/QuantizationLayer.cpp | 35 +++++++++++ 7 files changed, 196 insertions(+), 32 deletions(-) diff --git a/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h b/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h index 4e487cd57b..07c93d3306 100644 --- a/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLQuantizationLayerKernel.h @@ -51,7 +51,7 @@ public: ~CLQuantizationLayerKernel() = default; /** Set the input, output. * - * @param[in] input Source tensor. Data types supported: F32/F16. + * @param[in] input Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16. * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16. * * @note Output auto initialization is not supported by this kernel @@ -59,7 +59,7 @@ public: void configure(const ICLTensor *input, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayerKernel * - * @param[in] input Input tensor info. Data types supported: F32/F16. + * @param[in] input Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F32/F16. * @param[in] output Destination tensor info with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16. * * @return a status diff --git a/arm_compute/runtime/CL/functions/CLQuantizationLayer.h b/arm_compute/runtime/CL/functions/CLQuantizationLayer.h index ad039f570e..fbdef53aeb 100644 --- a/arm_compute/runtime/CL/functions/CLQuantizationLayer.h +++ b/arm_compute/runtime/CL/functions/CLQuantizationLayer.h @@ -42,7 +42,7 @@ class CLQuantizationLayer : public ICLSimpleFunction public: /** Set the input and output tensors. * - * @param[in] input Source tensor. The dimensions over the third will be interpreted as batches. Data types supported: F16/32. + * @param[in] input Source tensor. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/32. * @param[out] output Destination tensor with the same dimensions of input. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16. * * @note Output auto initialization is not supported by this function @@ -50,7 +50,7 @@ public: void configure(const ICLTensor *input, ICLTensor *output); /** Static function to check if given info will lead to a valid configuration of @ref CLQuantizationLayer * - * @param[in] input Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: F16/32. + * @param[in] input Input tensor info. The dimensions over the third will be interpreted as batches. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/32. * @param[in] output Output tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QASYMM16. * * @return a status diff --git a/src/core/CL/cl_kernels/quantization_layer.cl b/src/core/CL/cl_kernels/quantization_layer.cl index 41d9957150..cfb2bb65f7 100644 --- a/src/core/CL/cl_kernels/quantization_layer.cl +++ b/src/core/CL/cl_kernels/quantization_layer.cl @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -29,7 +29,7 @@ #if defined(VEC_SIZE) && defined(DATA_TYPE_IN) && defined(DATA_TYPE_OUT) && defined(SCALE) && defined(OFFSET) && defined(MIN_QUANT_VAL) && defined(MAX_QUANT_VAL) -/** This performs the quantization of floating point inputs to 8-bit unsigned integers. +/** This performs the quantization of floating point inputs or 8-bit quantized integers to 8-bit integers. * * @note Input data type should be given as a preprocessor argument using -DDATA_TYPE_IN=type. e.g. -DDATA_TYPE=short * @note Output data type should be given as a preprocessor argument using -DDATA_TYPE_OUT=type. e.g. -DDATA_TYPE=short @@ -38,8 +38,9 @@ * @note Quantization offset should be given as a preprocessor argument using -DOFFSET=offset. e.g. -DOFFSET=125 * @note Minimum value for quantized type should be given as a preprocessor argument using -DMIN_QUANT_VAL=value. e.g. -DMIN_QUANT_VAL=0 * @note Maximum value for quantized type should be given as a preprocessor argument using -DMAX_QUANT_VAL=value. e.g. -DMAXIN_QUANT_VAL=255 + * @note If the input data type if a floating point (F16 or F32) the preprocessor argument should be give as -DIS_FLOAT * - * @param[in] input_ptr Pointer to the source tensor. Supported data types: F32 + * @param[in] input_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/F16/F32 * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes) * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes) @@ -47,7 +48,7 @@ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes) * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor - * @param[out] output_ptr Pointer to the destination tensor. Supported data types: U8 + * @param[out] output_ptr Pointer to the destination tensor. Supported data types: QASYMM8/QASYMM8_SIGNED * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes) * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes) * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes) @@ -72,16 +73,30 @@ __kernel void quantization_layer( output.ptr -= max(xi - (int)LAST_ACCESSED_X, 0) * output_stride_x; // Load data +#if defined(IS_FLOAT) + // Load data VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE) - val = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input.ptr); + val_float = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input.ptr); // Create scale and offset vectors const VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE) vscale = SCALE; - const VEC_DATA_TYPE(int, VEC_SIZE) voffset = OFFSET; + const VEC_DATA_TYPE(int, VEC_SIZE) voffset = OFFSET; +#else // defined(IS_FLOAT) + // Load data + VEC_DATA_TYPE(DATA_TYPE_IN, VEC_SIZE) + val = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN *)input.ptr); + + const VEC_DATA_TYPE(float, VEC_SIZE) + val_float = CONVERT(val, VEC_DATA_TYPE(float, VEC_SIZE)); + + // Create scale and offset vectors + const VEC_DATA_TYPE(float, VEC_SIZE) vscale = SCALE; + const VEC_DATA_TYPE(int, VEC_SIZE) voffset = OFFSET; +#endif // defined(IS_FLOAT) // Quantize VEC_DATA_TYPE(int, VEC_SIZE) - res = CLAMP(CONVERT_RTE_VEC(val / vscale, int, VEC_SIZE) + voffset, MIN_QUANT_VAL, MAX_QUANT_VAL); + res = CLAMP(CONVERT_RTE_VEC(val_float / vscale, int, VEC_SIZE) + voffset, MIN_QUANT_VAL, MAX_QUANT_VAL); // Store result VSTORE(VEC_SIZE) @@ -90,4 +105,4 @@ __kernel void quantization_layer( *((__global DATA_TYPE_OUT *)(output.ptr)) = (DATA_TYPE_OUT)CLAMP(CONVERT_RTE(((float) * (__global DATA_TYPE_IN *)input.ptr) / ((float)SCALE), int) + (int)OFFSET, MIN_QUANT_VAL, MAX_QUANT_VAL); #endif // defined(VEC_SIZE) && defined(LAST_ACCESSED_X) } -#endif //defined(VEC_SIZE) && defined(DATA_TYPE_IN) && defined(DATA_TYPE_OUT) && defined(SCALE) && defined(OFFSET) && defined(MIN_QUANT_VAL) && defined(MAX_QUANT_VAL) +#endif // defined(VEC_SIZE) && defined(DATA_TYPE_IN) && defined(DATA_TYPE_OUT) && defined(SCALE) && defined(OFFSET) && defined(MIN_QUANT_VAL) && defined(MAX_QUANT_VAL) diff --git a/src/core/CL/kernels/CLQuantizationLayerKernel.cpp b/src/core/CL/kernels/CLQuantizationLayerKernel.cpp index 3d7aff0712..ab3b5d271d 100644 --- a/src/core/CL/kernels/CLQuantizationLayerKernel.cpp +++ b/src/core/CL/kernels/CLQuantizationLayerKernel.cpp @@ -41,7 +41,7 @@ namespace Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output); - ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F32, DataType::F16); ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input); // Output must always be initialized @@ -62,8 +62,7 @@ std::pair validate_and_configure_window(ITensorInfo *input, ITen const bool multi_access_x = (input_width_x / vec_size_x > 0); if(multi_access_x) { - win.set(Window::DimX, - Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x)); + win.set(Window::DimX, Window::Dimension(win.x().start(), ceil_to_multiple(win.x().end(), vec_size_x), vec_size_x)); } Coordinates coord; @@ -99,10 +98,53 @@ void CLQuantizationLayerKernel::configure(const ICLTensor *input, ICLTensor *out const UniformQuantizationInfo qinfo = output->info()->quantization_info().uniform(); const DataType output_data_type = output->info()->data_type(); + float scale_to_apply = qinfo.scale; + int32_t offset_to_apply = qinfo.offset; + if(is_data_type_quantized_asymmetric(_input->info()->data_type())) + { + /* + * In case of requantization of a quantized input tensor to an output tensor with another quantization + * instead of of apply dequantization and then a quantization functions, we just compute new scale and + * offset to apply. + * + * Assuming: + * - q_i as input quantized value + * - q_o as output quantized value + * - z_i as input quantization offset value + * - z_o as output quantization offset value + * - s_i as input quantization scale value + * - s_o as output quantization scale value + * - z_n as new quantization offset value + * - s_n as new quantization scale value + * + * q_o = ( q_i - z_i ) * s_i / s_o + z_o + * + * We can rewrite the formula as: + * + * q_o = ( q_i * s_i / s_o ) - z_i * s_i / s_o + z_o + * + * q_o = q_i / s_n + z_n + * + * Where: + * + * s_n = s_o / s_i + * + * z_n = - z_i * s_i / s_o + z_o + * + */ + const UniformQuantizationInfo qinfo_in = _input->info()->quantization_info().uniform(); + scale_to_apply /= qinfo_in.scale; + // In order to minimize flooring we convert the offset to a float, + // then compute the new offset in the float domain, + // finally we convert it back as int32_t + offset_to_apply -= static_cast(static_cast(qinfo_in.offset) * qinfo_in.scale / qinfo.scale); + } + // Create kernel CLBuildOptions build_opts; - build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(qinfo.scale)); - build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(qinfo.offset)); + build_opts.add_option_if(is_data_type_float(_input->info()->data_type()), "-DIS_FLOAT"); + build_opts.add_option("-DSCALE=" + float_to_string_with_full_precision(scale_to_apply)); + build_opts.add_option("-DOFFSET=" + support::cpp11::to_string(offset_to_apply)); build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x)); build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(input->info()->data_type())); build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output_data_type)); diff --git a/tests/validation/CL/QuantizationLayer.cpp b/tests/validation/CL/QuantizationLayer.cpp index e9544fdb8a..e3f47f98a8 100644 --- a/tests/validation/CL/QuantizationLayer.cpp +++ b/tests/validation/CL/QuantizationLayer.cpp @@ -42,9 +42,12 @@ namespace validation { namespace { -constexpr AbsoluteTolerance tolerance_f32(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */ -const auto QuantizationSmallShapes = concat(datasets::Small3DShapes(), datasets::Small4DShapes()); -const auto QuantizationLargeShapes = concat(datasets::Large3DShapes(), datasets::Large4DShapes()); +constexpr AbsoluteTolerance tolerance_f32(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */ +constexpr AbsoluteTolerance tolerance_u8(1); /**< Tolerance value for comparing reference's output against implementation's output for QASYMM8 data types */ +constexpr AbsoluteTolerance tolerance_s8(1); /**< Tolerance value for comparing reference's output against implementation's output for QASYMM8_SIGNED data types */ +constexpr AbsoluteTolerance tolerance_u16(1); /**< Tolerance value for comparing reference's output against implementation's output for QASYMM16 data types */ +const auto QuantizationSmallShapes = concat(datasets::Small3DShapes(), datasets::Small4DShapes()); +const auto QuantizationLargeShapes = concat(datasets::Large3DShapes(), datasets::Large4DShapes()); } // namespace TEST_SUITE(CL) @@ -53,7 +56,7 @@ TEST_SUITE(QuantizationLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Wrong input data type + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Wrong output data type TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Wrong output data type TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::F32), // Mismatching shapes TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Valid @@ -125,7 +128,7 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, CLQuantizationLayerQASYMM16Fixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes, framework::dataset::make("DataTypeIn", DataType::F32)), @@ -141,7 +144,7 @@ FIXTURE_DATA_TEST_CASE(RunLargeQASYMM16, CLQuantizationLayerQASYMM16Fixture, TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float +TEST_SUITE(Quantized) +template +using CLQuantizationLayerQASYMM8GenFixture = QuantizationValidationGenericFixture; +template +using CLQuantizationLayerQASYMM8_SIGNEDGenFixture = QuantizationValidationGenericFixture; +template +using CLQuantizationLayerQASYMM16GenFixture = QuantizationValidationGenericFixture; +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, CLQuantizationLayerQASYMM8GenFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes, + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })), + framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(0.5f, 10) })), + framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, 15) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_u8); +} +FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8_SIGNED, CLQuantizationLayerQASYMM8_SIGNEDGenFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes, + framework::dataset::make("DataTypeIn", DataType::QASYMM8)), + framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })), + framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10) })), + framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.0f, 15) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_s8); +} +FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, CLQuantizationLayerQASYMM16GenFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes, + framework::dataset::make("DataTypeIn", DataType::QASYMM8)), + framework::dataset::make("DataTypeOut", { DataType::QASYMM16 })), + framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10) })), + framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(4.0f, 23) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_u16); +} +TEST_SUITE_END() // QASYMM8 +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8_SIGNED, CLQuantizationLayerQASYMM8_SIGNEDGenFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes, + framework::dataset::make("DataTypeIn", DataType::QASYMM8_SIGNED)), + framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })), + framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10) })), + framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, 5) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_s8); +} +FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, CLQuantizationLayerQASYMM8GenFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes, + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.0f, 10) })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.0f, 30) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_u8); +} +TEST_SUITE_END() // QASYMM8_SIGNED +TEST_SUITE_END() // Quantized + TEST_SUITE_END() // QuantizationLayer TEST_SUITE_END() // CL } // namespace validation diff --git a/tests/validation/fixtures/QuantizationLayerFixture.h b/tests/validation/fixtures/QuantizationLayerFixture.h index 4ffc659027..085abefffc 100644 --- a/tests/validation/fixtures/QuantizationLayerFixture.h +++ b/tests/validation/fixtures/QuantizationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -43,14 +43,14 @@ namespace test namespace validation { template -class QuantizationValidationFixture : public framework::Fixture +class QuantizationValidationGenericFixture : public framework::Fixture { public: template - void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo) + void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo, QuantizationInfo qinfo_in) { - _target = compute_target(shape, data_type_in, data_type_out, qinfo); - _reference = compute_reference(shape, data_type_in, data_type_out, qinfo); + _target = compute_target(shape, data_type_in, data_type_out, qinfo, qinfo_in); + _reference = compute_reference(shape, data_type_in, data_type_out, qinfo, qinfo_in); } protected: @@ -60,10 +60,10 @@ protected: library->fill_tensor_uniform(tensor, 0); } - TensorType compute_target(const TensorShape &shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo) + TensorType compute_target(const TensorShape &shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo, QuantizationInfo qinfo_in) { // Create tensors - TensorType src = create_tensor(shape, data_type_in); + TensorType src = create_tensor(shape, data_type_in, 1, qinfo_in); TensorType dst = create_tensor(shape, data_type_out, 1, qinfo); // Create and configure function @@ -89,10 +89,10 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo) + SimpleTensor compute_reference(const TensorShape &shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo, QuantizationInfo qinfo_in) { // Create reference - SimpleTensor src{ shape, data_type_in }; + SimpleTensor src{ shape, data_type_in, 1, qinfo_in }; // Fill reference fill(src); @@ -104,6 +104,17 @@ protected: SimpleTensor _reference{}; }; +template +class QuantizationValidationFixture : public QuantizationValidationGenericFixture +{ +public: + template + void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo) + { + QuantizationValidationGenericFixture::setup(shape, data_type_in, data_type_out, qinfo, QuantizationInfo()); + } +}; + } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/reference/QuantizationLayer.cpp b/tests/validation/reference/QuantizationLayer.cpp index 8ba3744afc..cfc508529e 100644 --- a/tests/validation/reference/QuantizationLayer.cpp +++ b/tests/validation/reference/QuantizationLayer.cpp @@ -77,6 +77,41 @@ SimpleTensor quantization_layer(const SimpleTensor &src, DataType out return dst; } +template <> +SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info) +{ + SimpleTensor src_tmp = convert_from_asymmetric(src); + return quantization_layer(src_tmp, output_data_type, quantization_info); +} + +template <> +SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info) +{ + SimpleTensor src_tmp = convert_from_asymmetric(src); + return quantization_layer(src_tmp, output_data_type, quantization_info); +} + +template <> +SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info) +{ + SimpleTensor src_tmp = convert_from_asymmetric(src); + return quantization_layer(src_tmp, output_data_type, quantization_info); +} + +template <> +SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info) +{ + SimpleTensor src_tmp = convert_from_asymmetric(src); + return quantization_layer(src_tmp, output_data_type, quantization_info); +} + +template <> +SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info) +{ + SimpleTensor src_tmp = convert_from_asymmetric(src); + return quantization_layer(src_tmp, output_data_type, quantization_info); +} + template SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info); template SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info); template SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info); -- cgit v1.2.1