From 2f60221e60b69852918581b4eb450a0f81455a46 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Thu, 30 Jan 2020 17:30:32 +0000 Subject: COMPMID-3046: Add CLRequantizationLayerKernel Change-Id: I034f5aa023642f2323372495ddd14fc62b4c12e0 Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2681 Comments-Addressed: Arm Jenkins Reviewed-by: Giorgio Arena Tested-by: Arm Jenkins --- tests/validation/CL/QuantizationLayer.cpp | 73 ++++++++++++++++++++-- .../validation/fixtures/QuantizationLayerFixture.h | 29 ++++++--- tests/validation/reference/QuantizationLayer.cpp | 35 +++++++++++ 3 files changed, 122 insertions(+), 15 deletions(-) (limited to 'tests') diff --git a/tests/validation/CL/QuantizationLayer.cpp b/tests/validation/CL/QuantizationLayer.cpp index e9544fdb8a..e3f47f98a8 100644 --- a/tests/validation/CL/QuantizationLayer.cpp +++ b/tests/validation/CL/QuantizationLayer.cpp @@ -42,9 +42,12 @@ namespace validation { namespace { -constexpr AbsoluteTolerance tolerance_f32(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */ -const auto QuantizationSmallShapes = concat(datasets::Small3DShapes(), datasets::Small4DShapes()); -const auto QuantizationLargeShapes = concat(datasets::Large3DShapes(), datasets::Large4DShapes()); +constexpr AbsoluteTolerance tolerance_f32(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */ +constexpr AbsoluteTolerance tolerance_u8(1); /**< Tolerance value for comparing reference's output against implementation's output for QASYMM8 data types */ +constexpr AbsoluteTolerance tolerance_s8(1); /**< Tolerance value for comparing reference's output against implementation's output for QASYMM8_SIGNED data types */ +constexpr AbsoluteTolerance tolerance_u16(1); /**< Tolerance value for comparing reference's output against implementation's output for QASYMM16 data types */ +const auto QuantizationSmallShapes = concat(datasets::Small3DShapes(), datasets::Small4DShapes()); +const auto QuantizationLargeShapes = concat(datasets::Large3DShapes(), datasets::Large4DShapes()); } // namespace TEST_SUITE(CL) @@ -53,7 +56,7 @@ TEST_SUITE(QuantizationLayer) // *INDENT-OFF* // clang-format off DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Wrong input data type + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Wrong output data type TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Wrong output data type TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::F32), // Mismatching shapes TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32), // Valid @@ -125,7 +128,7 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, CLQuantizationLayerQASYMM16Fixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(QuantizationLargeShapes, framework::dataset::make("DataTypeIn", DataType::F32)), @@ -141,7 +144,7 @@ FIXTURE_DATA_TEST_CASE(RunLargeQASYMM16, CLQuantizationLayerQASYMM16Fixture, TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float +TEST_SUITE(Quantized) +template +using CLQuantizationLayerQASYMM8GenFixture = QuantizationValidationGenericFixture; +template +using CLQuantizationLayerQASYMM8_SIGNEDGenFixture = QuantizationValidationGenericFixture; +template +using CLQuantizationLayerQASYMM16GenFixture = QuantizationValidationGenericFixture; +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, CLQuantizationLayerQASYMM8GenFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes, + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })), + framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(0.5f, 10) })), + framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, 15) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_u8); +} +FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8_SIGNED, CLQuantizationLayerQASYMM8_SIGNEDGenFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes, + framework::dataset::make("DataTypeIn", DataType::QASYMM8)), + framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })), + framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10) })), + framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.0f, 15) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_s8); +} +FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, CLQuantizationLayerQASYMM16GenFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes, + framework::dataset::make("DataTypeIn", DataType::QASYMM8)), + framework::dataset::make("DataTypeOut", { DataType::QASYMM16 })), + framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10) })), + framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(4.0f, 23) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_u16); +} +TEST_SUITE_END() // QASYMM8 +TEST_SUITE(QASYMM8_SIGNED) +FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8_SIGNED, CLQuantizationLayerQASYMM8_SIGNEDGenFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes, + framework::dataset::make("DataTypeIn", DataType::QASYMM8_SIGNED)), + framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })), + framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10) })), + framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, 5) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_s8); +} +FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, CLQuantizationLayerQASYMM8GenFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(QuantizationSmallShapes, + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.0f, 10) })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.0f, 30) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_u8); +} +TEST_SUITE_END() // QASYMM8_SIGNED +TEST_SUITE_END() // Quantized + TEST_SUITE_END() // QuantizationLayer TEST_SUITE_END() // CL } // namespace validation diff --git a/tests/validation/fixtures/QuantizationLayerFixture.h b/tests/validation/fixtures/QuantizationLayerFixture.h index 4ffc659027..085abefffc 100644 --- a/tests/validation/fixtures/QuantizationLayerFixture.h +++ b/tests/validation/fixtures/QuantizationLayerFixture.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -43,14 +43,14 @@ namespace test namespace validation { template -class QuantizationValidationFixture : public framework::Fixture +class QuantizationValidationGenericFixture : public framework::Fixture { public: template - void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo) + void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo, QuantizationInfo qinfo_in) { - _target = compute_target(shape, data_type_in, data_type_out, qinfo); - _reference = compute_reference(shape, data_type_in, data_type_out, qinfo); + _target = compute_target(shape, data_type_in, data_type_out, qinfo, qinfo_in); + _reference = compute_reference(shape, data_type_in, data_type_out, qinfo, qinfo_in); } protected: @@ -60,10 +60,10 @@ protected: library->fill_tensor_uniform(tensor, 0); } - TensorType compute_target(const TensorShape &shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo) + TensorType compute_target(const TensorShape &shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo, QuantizationInfo qinfo_in) { // Create tensors - TensorType src = create_tensor(shape, data_type_in); + TensorType src = create_tensor(shape, data_type_in, 1, qinfo_in); TensorType dst = create_tensor(shape, data_type_out, 1, qinfo); // Create and configure function @@ -89,10 +89,10 @@ protected: return dst; } - SimpleTensor compute_reference(const TensorShape &shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo) + SimpleTensor compute_reference(const TensorShape &shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo, QuantizationInfo qinfo_in) { // Create reference - SimpleTensor src{ shape, data_type_in }; + SimpleTensor src{ shape, data_type_in, 1, qinfo_in }; // Fill reference fill(src); @@ -104,6 +104,17 @@ protected: SimpleTensor _reference{}; }; +template +class QuantizationValidationFixture : public QuantizationValidationGenericFixture +{ +public: + template + void setup(TensorShape shape, DataType data_type_in, DataType data_type_out, QuantizationInfo qinfo) + { + QuantizationValidationGenericFixture::setup(shape, data_type_in, data_type_out, qinfo, QuantizationInfo()); + } +}; + } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/reference/QuantizationLayer.cpp b/tests/validation/reference/QuantizationLayer.cpp index 8ba3744afc..cfc508529e 100644 --- a/tests/validation/reference/QuantizationLayer.cpp +++ b/tests/validation/reference/QuantizationLayer.cpp @@ -77,6 +77,41 @@ SimpleTensor quantization_layer(const SimpleTensor &src, DataType out return dst; } +template <> +SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info) +{ + SimpleTensor src_tmp = convert_from_asymmetric(src); + return quantization_layer(src_tmp, output_data_type, quantization_info); +} + +template <> +SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info) +{ + SimpleTensor src_tmp = convert_from_asymmetric(src); + return quantization_layer(src_tmp, output_data_type, quantization_info); +} + +template <> +SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info) +{ + SimpleTensor src_tmp = convert_from_asymmetric(src); + return quantization_layer(src_tmp, output_data_type, quantization_info); +} + +template <> +SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info) +{ + SimpleTensor src_tmp = convert_from_asymmetric(src); + return quantization_layer(src_tmp, output_data_type, quantization_info); +} + +template <> +SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info) +{ + SimpleTensor src_tmp = convert_from_asymmetric(src); + return quantization_layer(src_tmp, output_data_type, quantization_info); +} + template SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info); template SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info); template SimpleTensor quantization_layer(const SimpleTensor &src, DataType output_data_type, const QuantizationInfo &quantization_info); -- cgit v1.2.1