From 3463a8b9eed57340366743340d2d06df3aa1ae88 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Thu, 23 Aug 2018 13:11:53 +0100 Subject: COMPMID-1534: Fix NEActivationLayer for FP16 Simulates Logistic, Tanh and SoftRelu in FP32 Change-Id: I9950f7636b8ff2f3e054937e5ef414e45dfe06f5 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/145357 Tested-by: Jenkins Reviewed-by: Anthony Barbier --- tests/validation/Helpers.h | 5 ++-- tests/validation/NEON/ActivationLayer.cpp | 46 +++++++++++++++++++++++++------ 2 files changed, 40 insertions(+), 11 deletions(-) (limited to 'tests/validation') diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h index 814d1f5ed0..e5ba14849d 100644 --- a/tests/validation/Helpers.h +++ b/tests/validation/Helpers.h @@ -70,15 +70,16 @@ std::pair get_activation_layer_test_bounds(ActivationLayerInfo::Activation switch(activation) { + case ActivationLayerInfo::ActivationFunction::TANH: case ActivationLayerInfo::ActivationFunction::SQUARE: case ActivationLayerInfo::ActivationFunction::LOGISTIC: case ActivationLayerInfo::ActivationFunction::SOFT_RELU: // Reduce range as exponent overflows - bounds = std::make_pair(-10._h, 10._h); + bounds = std::make_pair(-2._h, 2._h); break; case ActivationLayerInfo::ActivationFunction::SQRT: // Reduce range as sqrt should take a non-negative number - bounds = std::make_pair(0._h, 255._h); + bounds = std::make_pair(0._h, 128._h); break; default: bounds = std::make_pair(-255._h, 255._h); diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index dee264c6b8..5b16b0621c 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -43,14 +43,42 @@ namespace validation { namespace { -/** Define tolerance of the activation layer. +/** Define relative tolerance of the activation layer. * * @param[in] data_type The data type used. * @param[in] activation The activation function used. * - * @return Tolerance depending on the activation function. + * @return Relative tolerance depending on the activation function. */ -AbsoluteTolerance tolerance(DataType data_type, ActivationLayerInfo::ActivationFunction activation) +RelativeTolerance relative_tolerance(DataType data_type, ActivationLayerInfo::ActivationFunction activation) +{ + switch(activation) + { + case ActivationLayerInfo::ActivationFunction::LOGISTIC: + case ActivationLayerInfo::ActivationFunction::SOFT_RELU: + case ActivationLayerInfo::ActivationFunction::SQRT: + case ActivationLayerInfo::ActivationFunction::TANH: + switch(data_type) + { + case DataType::F16: + return RelativeTolerance(0.1f); + default: + return RelativeTolerance(0.05f); + } + break; + default: + return RelativeTolerance(0.f); + } +} + +/** Define absolute tolerance of the activation layer. + * + * @param[in] data_type The data type used. + * @param[in] activation The activation function used. + * + * @return Absolute tolerance depending on the activation function. + */ +AbsoluteTolerance absolute_tolerance(DataType data_type, ActivationLayerInfo::ActivationFunction activation) { switch(activation) { @@ -163,14 +191,14 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixture, framework::Data DataType::F16))) { // Validate output - validate(Accessor(_target), _reference, tolerance(_data_type, _function)); + validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function)); } FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ActivationDataset), framework::dataset::make("DataType", DataType::F16))) { // Validate output - validate(Accessor(_target), _reference, tolerance(_data_type, _function)); + validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function)); } TEST_SUITE_END() #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ @@ -181,13 +209,13 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerFixture, framework::Dat { // Validate output - validate(Accessor(_target), _reference, tolerance(_data_type, _function)); + validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function)); } FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), ActivationDataset), framework::dataset::make("DataType", DataType::F32))) { // Validate output - validate(Accessor(_target), _reference, tolerance(_data_type, _function)); + validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function)); } TEST_SUITE_END() TEST_SUITE_END() @@ -211,7 +239,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture, fra framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance(_data_type, _function)); + validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function)); } FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), QuantizedActivationDataset), framework::dataset::make("DataType", @@ -219,7 +247,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture, fra framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance(_data_type, _function)); + validate(Accessor(_target), _reference, relative_tolerance(_data_type, _function), 0.f, absolute_tolerance(_data_type, _function)); } TEST_SUITE_END() TEST_SUITE_END() -- cgit v1.2.1