diff options
author | giuros01 <giuseppe.rossini@arm.com> | 2019-06-20 10:30:17 +0100 |
---|---|---|
committer | Manuel Bottini <manuel.bottini@arm.com> | 2019-07-01 10:20:51 +0000 |
commit | c9573f35c4267aa55648df4a134ebec82c5af93b (patch) | |
tree | aa17eabf8c1ecec995d22aa8a3479b9f15becaf7 /tests/validation/NEON/ActivationLayer.cpp | |
parent | 5a1320b923ec4db1cde00f9fc1be590022178b7f (diff) | |
download | ComputeLibrary-c9573f35c4267aa55648df4a134ebec82c5af93b.tar.gz |
COMPMID-2407: Add (logistic and tanh) activation support for QSYMM16 for NEON
Change-Id: Ib89c9cfe12975e51d1710af736c73ce79e667363
Signed-off-by: giuros01 <giuseppe.rossini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1412
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Manuel Bottini <manuel.bottini@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'tests/validation/NEON/ActivationLayer.cpp')
-rw-r--r-- | tests/validation/NEON/ActivationLayer.cpp | 29 |
1 files changed, 28 insertions, 1 deletions
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index a5030b94b7..1174a055c7 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -104,6 +104,8 @@ constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0); constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); #endif // defined(__aarch64__) +constexpr AbsoluteTolerance<int16_t> tolerance_qsymm16(1); + /** CNN data types */ const auto CNNDataTypes = framework::dataset::make("DataType", { @@ -233,7 +235,6 @@ const auto QuantizedActivationFunctionsDataset = framework::dataset::make("Activ ActivationLayerInfo::ActivationFunction::LOGISTIC, ActivationLayerInfo::ActivationFunction::TANH }); - const auto QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false }), QuantizedActivationFunctionsDataset), framework::dataset::make("AlphaBeta", { 0.5f, 1.f })); @@ -256,6 +257,32 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture<uint8_t>, fra validate(Accessor(_target), _reference, tolerance_qasymm8); } TEST_SUITE_END() // QASYMM8 + +/** Input data sets. */ +const auto Int16QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationFunction", { ActivationLayerInfo::ActivationFunction::LOGISTIC, + ActivationLayerInfo::ActivationFunction::TANH + }); +const auto Int16QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false }), Int16QuantizedActivationFunctionsDataset), + framework::dataset::make("AlphaBeta", { 0.5f, 1.f })); + +TEST_SUITE(QSYMM16) +FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), Int16QuantizedActivationDataset), + framework::dataset::make("DataType", + DataType::QSYMM16)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0.f) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qsymm16); +} +FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), Int16QuantizedActivationDataset), + framework::dataset::make("DataType", + DataType::QSYMM16)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0.f) }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qsymm16); +} +TEST_SUITE_END() // QSYMM16 TEST_SUITE_END() // Quantized TEST_SUITE_END() // ActivationLayer |