From 30dbeef2f46bdd6fe05d25dfa27cb4b2359dced3 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Wed, 26 Jun 2019 16:23:03 +0100 Subject: COMPMID-2411: Add (logistic and tanh) activation support for QSYMM16 for CL Change-Id: I8d72490b1cc58563ba7b94664135586bc40e6526 Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/1466 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: Georgios Pinitas --- tests/validation/CL/ActivationLayer.cpp | 31 +++++++++++++++++++++- tests/validation/fixtures/ActivationLayerFixture.h | 4 +-- 2 files changed, 32 insertions(+), 3 deletions(-) (limited to 'tests/validation') diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp index 45c2e0e683..fd203ccb7e 100644 --- a/tests/validation/CL/ActivationLayer.cpp +++ b/tests/validation/CL/ActivationLayer.cpp @@ -43,6 +43,8 @@ namespace validation { namespace { +constexpr AbsoluteTolerance tolerance_qsymm16(1.f); + /** Define tolerance of the activation layer. * * @param[in] activation The activation function used. @@ -139,6 +141,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Invalid quantization info TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), // Invalid activation function for QSYMM16 }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), @@ -146,6 +151,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)), })), framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), @@ -153,8 +161,11 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT), })), - framework::dataset::make("Expected", { false, false, true, true, false, false })), + framework::dataset::make("Expected", { false, false, true, true, false, false, true, true, false })), input_info, output_info, act_info, expected) { ARM_COMPUTE_EXPECT(bool(CLActivationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), act_info)) == expected, framework::LogLevel::ERRORS); @@ -228,6 +239,24 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerQuantizedFixture, fra validate(CLAccessor(_target), _reference, tolerance(_function, _data_type)); } TEST_SUITE_END() // QASYMM8 +TEST_SUITE(QSYMM16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset), + framework::dataset::make("DataType", + DataType::QSYMM16)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qsymm16); +} +FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), QuantizedActivationDataset), + framework::dataset::make("DataType", + DataType::QSYMM16)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qsymm16); +} +TEST_SUITE_END() // QSYMM16 TEST_SUITE_END() // Quantized TEST_SUITE_END() // ActivationLayer diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h index 4aaf8e7ce3..d9f26b7368 100644 --- a/tests/validation/fixtures/ActivationLayerFixture.h +++ b/tests/validation/fixtures/ActivationLayerFixture.h @@ -73,7 +73,7 @@ protected: std::uniform_real_distribution<> distribution(min_bound, max_bound); library->fill(tensor, distribution, 0); } - else if(is_data_type_quantized_asymmetric(tensor.data_type()) || (is_data_type_quantized_symmetric(tensor.data_type()))) + else if(is_data_type_quantized(tensor.data_type())) { library->fill_tensor_uniform(tensor, 0); } @@ -96,7 +96,7 @@ protected: // Create and configure function FunctionType act_layer; - TensorType *dst_ptr = _in_place ? &src : &dst; + TensorType *dst_ptr = _in_place ? nullptr : &dst; act_layer.configure(&src, dst_ptr, info); -- cgit v1.2.1