aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2019-06-26 16:23:03 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-07-09 09:24:21 +0000
commit30dbeef2f46bdd6fe05d25dfa27cb4b2359dced3 (patch)
tree33e12ced1dca23b79212b6afd64950ed4a40363b /tests
parentebdde65530c8819a16d558fc5ebb3cc519fbc344 (diff)
downloadComputeLibrary-30dbeef2f46bdd6fe05d25dfa27cb4b2359dced3.tar.gz
COMPMID-2411: Add (logistic and tanh) activation support for QSYMM16 for CL
Change-Id: I8d72490b1cc58563ba7b94664135586bc40e6526 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/1466 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/CL/ActivationLayer.cpp31
-rw-r--r--tests/validation/fixtures/ActivationLayerFixture.h4
2 files changed, 32 insertions, 3 deletions
diff --git a/tests/validation/CL/ActivationLayer.cpp b/tests/validation/CL/ActivationLayer.cpp
index 45c2e0e683..fd203ccb7e 100644
--- a/tests/validation/CL/ActivationLayer.cpp
+++ b/tests/validation/CL/ActivationLayer.cpp
@@ -43,6 +43,8 @@ namespace validation
{
namespace
{
+constexpr AbsoluteTolerance<float> tolerance_qsymm16(1.f);
+
/** Define tolerance of the activation layer.
*
* @param[in] activation The activation function used.
@@ -139,6 +141,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Invalid quantization info
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), // Invalid activation function for QSYMM16
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
@@ -146,6 +151,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8),
TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)),
+ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)),
})),
framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
@@ -153,8 +161,11 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH),
ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC),
+ ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT),
})),
- framework::dataset::make("Expected", { false, false, true, true, false, false })),
+ framework::dataset::make("Expected", { false, false, true, true, false, false, true, true, false })),
input_info, output_info, act_info, expected)
{
ARM_COMPUTE_EXPECT(bool(CLActivationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), act_info)) == expected, framework::LogLevel::ERRORS);
@@ -228,6 +239,24 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerQuantizedFixture<uint8_t>, fra
validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
}
TEST_SUITE_END() // QASYMM8
+TEST_SUITE(QSYMM16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset),
+ framework::dataset::make("DataType",
+ DataType::QSYMM16)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qsymm16);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), QuantizedActivationDataset),
+ framework::dataset::make("DataType",
+ DataType::QSYMM16)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qsymm16);
+}
+TEST_SUITE_END() // QSYMM16
TEST_SUITE_END() // Quantized
TEST_SUITE_END() // ActivationLayer
diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h
index 4aaf8e7ce3..d9f26b7368 100644
--- a/tests/validation/fixtures/ActivationLayerFixture.h
+++ b/tests/validation/fixtures/ActivationLayerFixture.h
@@ -73,7 +73,7 @@ protected:
std::uniform_real_distribution<> distribution(min_bound, max_bound);
library->fill(tensor, distribution, 0);
}
- else if(is_data_type_quantized_asymmetric(tensor.data_type()) || (is_data_type_quantized_symmetric(tensor.data_type())))
+ else if(is_data_type_quantized(tensor.data_type()))
{
library->fill_tensor_uniform(tensor, 0);
}
@@ -96,7 +96,7 @@ protected:
// Create and configure function
FunctionType act_layer;
- TensorType *dst_ptr = _in_place ? &src : &dst;
+ TensorType *dst_ptr = _in_place ? nullptr : &dst;
act_layer.configure(&src, dst_ptr, info);