diff options
author | Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com> | 2024-01-30 18:25:51 +0000 |
---|---|---|
committer | Gunes Bayir <gunes.bayir@arm.com> | 2024-02-20 11:31:57 +0000 |
commit | 0a48c4c83b598991b4d4235f870c24d9e6634b20 (patch) | |
tree | 4d0117496c527fd952f435711e5c385023d7068e /tests/validation/NEON/QuantizationLayer.cpp | |
parent | 946905847bf1d82b183e718fddfc7664702e5a84 (diff) | |
download | ComputeLibrary-0a48c4c83b598991b4d4235f870c24d9e6634b20.tar.gz |
Requantization cases for offset changes only
Resolves: [COMPMID-6681]
Signed-off-by: Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com>
Change-Id: I325b9d478dd1d04a45533bb7708cf76e98ee0cee
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11058
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/NEON/QuantizationLayer.cpp')
-rw-r--r-- | tests/validation/NEON/QuantizationLayer.cpp | 30 |
1 files changed, 25 insertions, 5 deletions
diff --git a/tests/validation/NEON/QuantizationLayer.cpp b/tests/validation/NEON/QuantizationLayer.cpp index aeee54c835..bab7490762 100644 --- a/tests/validation/NEON/QuantizationLayer.cpp +++ b/tests/validation/NEON/QuantizationLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 Arm Limited. + * Copyright (c) 2017-2021, 2024 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,6 +34,7 @@ #include "tests/validation/Validation.h" #include "tests/validation/fixtures/QuantizationLayerFixture.h" + namespace arm_compute { namespace test @@ -182,7 +183,16 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, NEQuantizationLayerQASYMM8GenFixture<uin framework::dataset::make("DataType", DataType::QASYMM8)), framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })), framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(0.5f, 10) })), - framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, 15) }))) + framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, 15), QuantizationInfo(0.5f, 25) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_u8); +} +FIXTURE_DATA_TEST_CASE(ConvertUint8toInt8, NEQuantizationLayerQASYMM8GenFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(QuantizationSmallShapes, + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })), + framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(2.0f, -1) })), + framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, 127) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_u8); @@ -191,7 +201,7 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8_SIGNED, NEQuantizationLayerQASYMM8_SIGNED framework::dataset::make("DataTypeIn", DataType::QASYMM8)), framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })), framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10), QuantizationInfo(2.0f, -25) })), - framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.0f, 15) }))) + framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.0f, 15), QuantizationInfo(1.0f, 127) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_s8); @@ -211,7 +221,7 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8_SIGNED, NEQuantizationLayerQASYMM8_SIGNED framework::dataset::make("DataTypeIn", DataType::QASYMM8_SIGNED)), framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })), framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 10) })), - framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, -5) }))) + framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(2.0f, -5), QuantizationInfo(1.0f, 43) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_s8); @@ -220,11 +230,21 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, NEQuantizationLayerQASYMM8GenFixture<int framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })), framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(2.0f, 10), QuantizationInfo(2.0f, -25) })), - framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.0f, 30) }))) + framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.0f, 30), QuantizationInfo(2.0f, -128) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_u8); } +FIXTURE_DATA_TEST_CASE(ConvertInt8toUint8, NEQuantizationLayerQASYMM8_SIGNEDGenFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(QuantizationSmallShapes, + framework::dataset::make("DataTypeIn", DataType::QASYMM8_SIGNED)), + framework::dataset::make("DataTypeOut", { DataType::QASYMM8 })), + framework::dataset::make("QuantizationInfoOutput", { QuantizationInfo(1.0f, 0) })), + framework::dataset::make("QuantizationInfoInput", { QuantizationInfo(1.0f, -128) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_s8); +} + TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE_END() // Quantized |