diff options
author | Sang-Hoon Park <sang-hoon.park@arm.com> | 2020-11-25 11:46:03 +0000 |
---|---|---|
committer | Sang-Hoon Park <sang-hoon.park@arm.com> | 2020-12-02 10:18:46 +0000 |
commit | add8e815ea94c8f8e6b1c9faf18527695f1332ec (patch) | |
tree | 5941064344b426d12bc76b2fba3d0c631e796088 /tests | |
parent | 4ffc42afafc8e6eee9917ac27b4bc510973335bf (diff) | |
download | ComputeLibrary-add8e815ea94c8f8e6b1c9faf18527695f1332ec.tar.gz |
COMPMID-3862: Add support QASYMM8 LEAKY RELU activation
- LEAKY RELU activation is supported for QASYMM8 data type
- vquantize on NEON side has been modified to match with
other backends (OpenCL and reference)
Change-Id: I194631225c8d4f3cc96027d64812ec2be2b4328a
Signed-off-by: Sang-Hoon Park <sang-hoon.park@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4593
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-by: Manuel Bottini <manuel.bottini@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/validation/CL/ActivationLayer.cpp | 6 | ||||
-rw-r--r-- | tests/validation/NEON/ActivationLayer.cpp | 18 |
2 files changed, 14 insertions, 10 deletions
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp index 9b725a44e7..fa95594157 100644 --- a/tests/validation/CL/ActivationLayer.cpp +++ b/tests/validation/CL/ActivationLayer.cpp @@ -168,8 +168,10 @@ template <typename T> using CLActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<CLTensor, CLAccessor, CLActivationLayer, T>; const auto QuantizedActivationDataset8 = combine(combine(framework::dataset::make("InPlace", { false }), - concat(datasets::ActivationFunctionsQuantized(), framework::dataset::make("ActivationFunction", ActivationLayerInfo::ActivationFunction::HARD_SWISH))), - framework::dataset::make("AlphaBeta", { 0.5f, 1.f })); + concat(datasets::ActivationFunctionsQuantized(), + framework::dataset::make("ActivationFunction", +{ ActivationLayerInfo::ActivationFunction::HARD_SWISH, ActivationLayerInfo::ActivationFunction::LEAKY_RELU }))), +framework::dataset::make("AlphaBeta", { 0.5f, 1.f })); const auto QuantizedActivationDataset16 = combine(combine(framework::dataset::make("InPlace", { false }), datasets::ActivationFunctionsQuantized()), diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index 0ef4590d7e..84ff288b2f 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -48,8 +48,7 @@ namespace validation namespace { RelativeTolerance<float> tolerance_float_sqrt(0.0001f); - - + /** Define relative tolerance of the activation layer. * * @param[in] data_type The data type used. @@ -234,12 +233,15 @@ template <typename T> using NEActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<Tensor, Accessor, NEActivationLayer, T>; /** Input data sets. */ -const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationFunction", { ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, - ActivationLayerInfo::ActivationFunction::RELU, - ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, - ActivationLayerInfo::ActivationFunction::LOGISTIC, - ActivationLayerInfo::ActivationFunction::TANH - }); +const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationFunction", +{ + ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, + ActivationLayerInfo::ActivationFunction::RELU, + ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, + ActivationLayerInfo::ActivationFunction::LOGISTIC, + ActivationLayerInfo::ActivationFunction::TANH, + ActivationLayerInfo::ActivationFunction::LEAKY_RELU, +}); const auto QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false }), concat(QuantizedActivationFunctionsDataset, framework::dataset::make("ActivationFunction", ActivationLayerInfo::ActivationFunction::HARD_SWISH))), |