aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorSang-Hoon Park <sang-hoon.park@arm.com>2019-11-27 15:26:44 +0000
committerMichele Di Giorgio <michele.digiorgio@arm.com>2019-12-20 15:50:35 +0000
commitfb6aaeb3b02ad3210d57c1d945998f93c11de474 (patch)
tree9a99acaa6f36f28066b2d7b1b32809b980f88f2d /tests
parentf29d1b7d8bf2d1619554eb3443556b44d4aa1a4c (diff)
downloadComputeLibrary-fb6aaeb3b02ad3210d57c1d945998f93c11de474.tar.gz
COMPMID-2773 [NE] add support for QASYMM8_SIGNED to QuantizationLayer
Change-Id: Ib692a79228fd85ee600c212d77439ca38d71f332 Signed-off-by: Sang-Hoon Park <sang-hoon.park@arm.com> Reviewed-on: https://review.mlplatform.org/c/2377 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/NEON/QuantizationLayer.cpp18
-rw-r--r--tests/validation/reference/QuantizationLayer.cpp27
2 files changed, 35 insertions, 10 deletions
diff --git a/tests/validation/NEON/QuantizationLayer.cpp b/tests/validation/NEON/QuantizationLayer.cpp
index 49118f7dc5..a4af2a2886 100644
--- a/tests/validation/NEON/QuantizationLayer.cpp
+++ b/tests/validation/NEON/QuantizationLayer.cpp
@@ -100,6 +100,8 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(QuantizationS
template <typename T>
using NEQuantizationLayerQASYMM8Fixture = QuantizationValidationFixture<Tensor, Accessor, NEQuantizationLayer, T, uint8_t>;
template <typename T>
+using NEQuantizationLayerQASYMM8SignedFixture = QuantizationValidationFixture<Tensor, Accessor, NEQuantizationLayer, T, int8_t>;
+template <typename T>
using NEQuantizationLayerQASYMM16Fixture = QuantizationValidationFixture<Tensor, Accessor, NEQuantizationLayer, T, uint16_t>;
TEST_SUITE(Float)
@@ -112,6 +114,14 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, NEQuantizationLayerQASYMM8Fixture<float>
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
}
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8Signed, NEQuantizationLayerQASYMM8SignedFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_u8);
+}
FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, NEQuantizationLayerQASYMM16Fixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(QuantizationSmallShapes,
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("DataTypeOut", { DataType::QASYMM16 })),
@@ -147,6 +157,14 @@ FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8, NEQuantizationLayerQASYMM8Fixture<half>,
// Validate output
validate(Accessor(_target), _reference, tolerance_u8);
}
+FIXTURE_DATA_TEST_CASE(RunSmallQASYMM8Signed, NEQuantizationLayerQASYMM8SignedFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(QuantizationSmallShapes,
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("DataTypeOut", { DataType::QASYMM8_SIGNED })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_u8);
+}
FIXTURE_DATA_TEST_CASE(RunSmallQASYMM16, NEQuantizationLayerQASYMM16Fixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(QuantizationSmallShapes,
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("DataTypeOut", { DataType::QASYMM16 })),
diff --git a/tests/validation/reference/QuantizationLayer.cpp b/tests/validation/reference/QuantizationLayer.cpp
index ae23f7ec27..35d44ffa49 100644
--- a/tests/validation/reference/QuantizationLayer.cpp
+++ b/tests/validation/reference/QuantizationLayer.cpp
@@ -40,26 +40,31 @@ SimpleTensor<Tout> quantization_layer(const SimpleTensor<Tin> &src, DataType out
SimpleTensor<Tout> dst{ src.shape(), output_data_type, 1, quantization_info };
const UniformQuantizationInfo qinfo = quantization_info.uniform();
+
+#ifdef __aarch64__
+ constexpr auto rounding_policy = RoundingPolicy::TO_NEAREST_EVEN;
+#else // __aarch64__
+ constexpr auto rounding_policy = RoundingPolicy::TO_ZERO;
+#endif // __aarch64__
+
switch(output_data_type)
{
case DataType::QASYMM8:
for(int i = 0; i < src.num_elements(); ++i)
{
-#ifdef __aarch64__
- dst[i] = quantize_qasymm8((src[i]), qinfo, RoundingPolicy::TO_NEAREST_EVEN);
-#else // __aarch64__
- dst[i] = quantize_qasymm8((src[i]), qinfo, RoundingPolicy::TO_ZERO);
-#endif // __aarch64__
+ dst[i] = quantize_qasymm8((src[i]), qinfo, rounding_policy);
+ }
+ break;
+ case DataType::QASYMM8_SIGNED:
+ for(int i = 0; i < src.num_elements(); ++i)
+ {
+ dst[i] = quantize_qasymm8_signed((src[i]), qinfo, rounding_policy);
}
break;
case DataType::QASYMM16:
for(int i = 0; i < src.num_elements(); ++i)
{
-#ifdef __aarch64__
- dst[i] = quantize_qasymm16((src[i]), qinfo, RoundingPolicy::TO_NEAREST_EVEN);
-#else // __aarch64__
- dst[i] = quantize_qasymm16((src[i]), qinfo, RoundingPolicy::TO_ZERO);
-#endif // __aarch64__
+ dst[i] = quantize_qasymm16((src[i]), qinfo, rounding_policy);
}
break;
default:
@@ -72,6 +77,8 @@ template SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<half> &src,
template SimpleTensor<uint8_t> quantization_layer(const SimpleTensor<float> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
template SimpleTensor<uint16_t> quantization_layer(const SimpleTensor<half> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
template SimpleTensor<uint16_t> quantization_layer(const SimpleTensor<float> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
+template SimpleTensor<int8_t> quantization_layer(const SimpleTensor<half> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
+template SimpleTensor<int8_t> quantization_layer(const SimpleTensor<float> &src, DataType output_data_type, const QuantizationInfo &quantization_info);
} // namespace reference
} // namespace validation
} // namespace test