aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorgiuros01 <giuseppe.rossini@arm.com>2019-06-20 10:30:17 +0100
committerManuel Bottini <manuel.bottini@arm.com>2019-07-01 10:20:51 +0000
commitc9573f35c4267aa55648df4a134ebec82c5af93b (patch)
treeaa17eabf8c1ecec995d22aa8a3479b9f15becaf7 /tests
parent5a1320b923ec4db1cde00f9fc1be590022178b7f (diff)
downloadComputeLibrary-c9573f35c4267aa55648df4a134ebec82c5af93b.tar.gz
COMPMID-2407: Add (logistic and tanh) activation support for QSYMM16 for NEON
Change-Id: Ib89c9cfe12975e51d1710af736c73ce79e667363 Signed-off-by: giuros01 <giuseppe.rossini@arm.com> Reviewed-on: https://review.mlplatform.org/c/1412 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Manuel Bottini <manuel.bottini@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/NEON/ActivationLayer.cpp29
-rw-r--r--tests/validation/fixtures/ActivationLayerFixture.h39
-rw-r--r--tests/validation/reference/ActivationLayer.cpp11
3 files changed, 71 insertions, 8 deletions
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp
index a5030b94b7..1174a055c7 100644
--- a/tests/validation/NEON/ActivationLayer.cpp
+++ b/tests/validation/NEON/ActivationLayer.cpp
@@ -104,6 +104,8 @@ constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
#endif // defined(__aarch64__)
+constexpr AbsoluteTolerance<int16_t> tolerance_qsymm16(1);
+
/** CNN data types */
const auto CNNDataTypes = framework::dataset::make("DataType",
{
@@ -233,7 +235,6 @@ const auto QuantizedActivationFunctionsDataset = framework::dataset::make("Activ
ActivationLayerInfo::ActivationFunction::LOGISTIC,
ActivationLayerInfo::ActivationFunction::TANH
});
-
const auto QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false }), QuantizedActivationFunctionsDataset),
framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
@@ -256,6 +257,32 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture<uint8_t>, fra
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
TEST_SUITE_END() // QASYMM8
+
+/** Input data sets. */
+const auto Int16QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationFunction", { ActivationLayerInfo::ActivationFunction::LOGISTIC,
+ ActivationLayerInfo::ActivationFunction::TANH
+ });
+const auto Int16QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false }), Int16QuantizedActivationFunctionsDataset),
+ framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
+
+TEST_SUITE(QSYMM16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), Int16QuantizedActivationDataset),
+ framework::dataset::make("DataType",
+ DataType::QSYMM16)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0.f) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qsymm16);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), Int16QuantizedActivationDataset),
+ framework::dataset::make("DataType",
+ DataType::QSYMM16)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0.f) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qsymm16);
+}
+TEST_SUITE_END() // QSYMM16
TEST_SUITE_END() // Quantized
TEST_SUITE_END() // ActivationLayer
diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h
index 464382a1ec..4aaf8e7ce3 100644
--- a/tests/validation/fixtures/ActivationLayerFixture.h
+++ b/tests/validation/fixtures/ActivationLayerFixture.h
@@ -52,11 +52,11 @@ public:
ActivationLayerInfo info(function, alpha_beta, alpha_beta);
_in_place = in_place;
- _output_quantization_info = calculate_output_quantization_info(info, quantization_info);
- _input_quantization_info = in_place ? _output_quantization_info : quantization_info;
_data_type = data_type;
- _function = function;
+ _output_quantization_info = calculate_output_quantization_info(_data_type, info, quantization_info);
+ _input_quantization_info = in_place ? _output_quantization_info : quantization_info;
+ _function = function;
_target = compute_target(shape, info);
_reference = compute_reference(shape, info);
}
@@ -73,7 +73,7 @@ protected:
std::uniform_real_distribution<> distribution(min_bound, max_bound);
library->fill(tensor, distribution, 0);
}
- else if(is_data_type_quantized_asymmetric(tensor.data_type()))
+ else if(is_data_type_quantized_asymmetric(tensor.data_type()) || (is_data_type_quantized_symmetric(tensor.data_type())))
{
library->fill_tensor_uniform(tensor, 0);
}
@@ -141,14 +141,39 @@ protected:
}
private:
- QuantizationInfo calculate_output_quantization_info(const ActivationLayerInfo &act_info, const QuantizationInfo &default_qinfo)
+ QuantizationInfo calculate_output_quantization_info(DataType dt, const ActivationLayerInfo &act_info, const QuantizationInfo &default_qinfo)
{
+ auto qasymm8_max = float(std::numeric_limits<uint8_t>::max()) + 1.f;
+ auto qsymm16_max = float(std::numeric_limits<int16_t>::max()) + 1.f;
+
switch(act_info.activation())
{
case ActivationLayerInfo::ActivationFunction::TANH:
- return QuantizationInfo(1.f / 128.f, 128);
+ if(dt == DataType::QSYMM16)
+ {
+ return QuantizationInfo(1.f / qsymm16_max, 0);
+ }
+ else if(dt == DataType::QASYMM8)
+ {
+ return QuantizationInfo(1.f / (0.5 * qasymm8_max), int(0.5 * qasymm8_max));
+ }
+ else
+ {
+ return default_qinfo;
+ }
case ActivationLayerInfo::ActivationFunction::LOGISTIC:
- return QuantizationInfo(1.f / 256.f, 0);
+ if(dt == DataType::QSYMM16)
+ {
+ return QuantizationInfo(1.f / qsymm16_max, 0);
+ }
+ else if(dt == DataType::QASYMM8)
+ {
+ return QuantizationInfo(1.f / qasymm8_max, 0);
+ }
+ else
+ {
+ return default_qinfo;
+ }
default:
return default_qinfo;
}
diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp
index f5e98aa7e8..f573d12df8 100644
--- a/tests/validation/reference/ActivationLayer.cpp
+++ b/tests/validation/reference/ActivationLayer.cpp
@@ -65,6 +65,17 @@ SimpleTensor<uint8_t> activation_layer<uint8_t>(const SimpleTensor<uint8_t> &src
return dst;
}
+template <>
+SimpleTensor<int16_t> activation_layer<int16_t>(const SimpleTensor<int16_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
+{
+ const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info;
+
+ SimpleTensor<float> src_tmp = convert_from_symmetric(src);
+ SimpleTensor<float> dst_tmp = activation_layer<float>(src_tmp, info);
+ SimpleTensor<int16_t> dst = convert_to_symmetric<int16_t>(dst_tmp, dst_qinfo);
+ return dst;
+}
+
template SimpleTensor<float> activation_layer(const SimpleTensor<float> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
template SimpleTensor<half> activation_layer(const SimpleTensor<half> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
} // namespace reference