aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorSang-Hoon Park <sang-hoon.park@arm.com>2019-12-04 09:46:28 +0000
committerMichele Di Giorgio <michele.digiorgio@arm.com>2019-12-20 09:56:36 +0000
commitd817647a4fabc8eccd0e64f54465e378a4239b32 (patch)
tree361ba03c78d83974cf7396510c85feb6dd3159e6 /tests
parente7be8a072967f9ae547468a7625e11477ea32221 (diff)
downloadComputeLibrary-d817647a4fabc8eccd0e64f54465e378a4239b32.tar.gz
COMPMID-2767 [NEON] add support for QASYMM8_SIGNED to DequantizationLayer
Change-Id: If5b21d1e656b21baf39346c2fd74e8edc75007f5 Signed-off-by: Sang-Hoon Park <sang-hoon.park@arm.com> Reviewed-on: https://review.mlplatform.org/c/2429 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/AssetsLibrary.h1
-rw-r--r--tests/validation/NEON/DequantizationLayer.cpp26
-rw-r--r--tests/validation/fixtures/DequantizationLayerFixture.h3
-rw-r--r--tests/validation/reference/DequantizationLayer.cpp21
4 files changed, 40 insertions, 11 deletions
diff --git a/tests/AssetsLibrary.h b/tests/AssetsLibrary.h
index 29d9cc6d49..c4892748f4 100644
--- a/tests/AssetsLibrary.h
+++ b/tests/AssetsLibrary.h
@@ -846,6 +846,7 @@ void AssetsLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_t
}
case DataType::S8:
case DataType::QSYMM8:
+ case DataType::QASYMM8_SIGNED:
{
ARM_COMPUTE_ERROR_ON(!(std::is_same<int8_t, D>::value));
std::uniform_int_distribution<int8_t> distribution_s8(low, high);
diff --git a/tests/validation/NEON/DequantizationLayer.cpp b/tests/validation/NEON/DequantizationLayer.cpp
index 0dce76a933..4389419d73 100644
--- a/tests/validation/NEON/DequantizationLayer.cpp
+++ b/tests/validation/NEON/DequantizationLayer.cpp
@@ -55,6 +55,14 @@ const auto dataset_quant_f32 = combine(combine(combine(datasets::SmallShapes(),
const auto dataset_quant_f16 = combine(combine(combine(datasets::SmallShapes(), datasets::QuantizedTypes()),
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("DataLayout", { DataLayout::NCHW }));
+const auto dataset_quant_asymm_signed_f32 = combine(combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("QuantizedTypes", { DataType::QASYMM8_SIGNED })),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW }));
+const auto dataset_quant_asymm_signed_f16 = combine(combine(combine(datasets::SmallShapes(),
+ framework::dataset::make("QuantizedTypes", { DataType::QASYMM8_SIGNED })),
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW }));
const auto dataset_quant_per_channel_f32 = combine(combine(combine(datasets::SmallShapes(), datasets::QuantizedPerChannelTypes()),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }));
@@ -73,6 +81,12 @@ const auto dataset_quant_per_channel_nightly_f32 = combine(combine(combine(datas
const auto dataset_quant_per_channel_nightly_f16 = combine(combine(combine(datasets::LargeShapes(), datasets::QuantizedPerChannelTypes()),
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }));
+
+const auto dataset_precommit_f16 = concat(concat(dataset_quant_f16, dataset_quant_per_channel_f16), dataset_quant_asymm_signed_f16);
+const auto dataset_precommit_f32 = concat(concat(dataset_quant_f32, dataset_quant_per_channel_f32), dataset_quant_asymm_signed_f32);
+const auto dataset_nightly_f16 = concat(dataset_quant_f16, dataset_quant_per_channel_f16);
+const auto dataset_nightly_f32 = concat(dataset_quant_f32, dataset_quant_per_channel_f32);
+
} // namespace
TEST_SUITE(NEON)
@@ -86,14 +100,16 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
TensorInfo(TensorShape(16U, 16U, 2U, 5U), 1, DataType::QASYMM8), // Missmatching shapes
TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Valid
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8), // Valid
+ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::QASYMM8_SIGNED), // Valid
}),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::U8),
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(17U, 16U, 16U, 5U), 1, DataType::F32),
TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
+ TensorInfo(TensorShape(16U, 16U, 16U, 5U), 1, DataType::F32),
})),
- framework::dataset::make("Expected", { false, false, false, true, true})),
+ framework::dataset::make("Expected", { false, false, false, true, true, true })),
input_info, output_info, expected)
{
ARM_COMPUTE_EXPECT(bool(NEDequantizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
@@ -132,12 +148,12 @@ using NEDequantizationLayerFixture = DequantizationValidationFixture<Tensor, Acc
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, concat(dataset_quant_f16, dataset_quant_per_channel_f16))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, dataset_precommit_f16)
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<half>, framework::DatasetMode::NIGHTLY, concat(dataset_quant_nightly_f16, dataset_quant_per_channel_nightly_f16))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<half>, framework::DatasetMode::NIGHTLY, dataset_nightly_f16)
{
// Validate output
validate(Accessor(_target), _reference);
@@ -146,12 +162,12 @@ TEST_SUITE_END() // FP16
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, concat(dataset_quant_f32, dataset_quant_per_channel_f32))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, dataset_precommit_f32)
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, concat(dataset_quant_nightly_f32, dataset_quant_per_channel_nightly_f32))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, dataset_nightly_f32)
{
// Validate output
validate(Accessor(_target), _reference);
diff --git a/tests/validation/fixtures/DequantizationLayerFixture.h b/tests/validation/fixtures/DequantizationLayerFixture.h
index f44f8658c2..3699613a39 100644
--- a/tests/validation/fixtures/DequantizationLayerFixture.h
+++ b/tests/validation/fixtures/DequantizationLayerFixture.h
@@ -106,6 +106,7 @@ protected:
fill(src);
return reference::dequantization_layer<T>(src);
}
+ case DataType::QASYMM8_SIGNED:
case DataType::QSYMM8_PER_CHANNEL:
case DataType::QSYMM8:
{
@@ -149,6 +150,8 @@ protected:
}
case DataType::QASYMM8:
return QuantizationInfo(1.f / distribution_scale_q8(gen), distribution_offset_q8(gen));
+ case DataType::QASYMM8_SIGNED:
+ return QuantizationInfo(1.f / distribution_scale_q8(gen), -distribution_offset_q8(gen));
default:
ARM_COMPUTE_ERROR("Unsupported data type");
}
diff --git a/tests/validation/reference/DequantizationLayer.cpp b/tests/validation/reference/DequantizationLayer.cpp
index 16f25c4427..7dd36402b3 100644
--- a/tests/validation/reference/DequantizationLayer.cpp
+++ b/tests/validation/reference/DequantizationLayer.cpp
@@ -36,18 +36,27 @@ namespace reference
namespace
{
template <typename TOut>
-TOut dequantize(int8_t val, const UniformQuantizationInfo qinfo)
+TOut dequantize(int8_t val, const UniformQuantizationInfo qinfo, DataType dt)
{
- return static_cast<TOut>(dequantize_qsymm8(val, qinfo));
+ if(dt == DataType::QSYMM8 || dt == DataType::QSYMM8_PER_CHANNEL)
+ {
+ return static_cast<TOut>(dequantize_qsymm8(val, qinfo));
+ }
+ else
+ {
+ return static_cast<TOut>(dequantize_qasymm8_signed(val, qinfo));
+ }
}
template <typename TOut>
-TOut dequantize(uint8_t val, const UniformQuantizationInfo qinfo)
+TOut dequantize(uint8_t val, const UniformQuantizationInfo qinfo, DataType dt)
{
+ ARM_COMPUTE_UNUSED(dt);
return static_cast<TOut>(dequantize_qasymm8(val, qinfo));
}
template <typename TOut>
-TOut dequantize(int16_t val, const UniformQuantizationInfo qinfo)
+TOut dequantize(int16_t val, const UniformQuantizationInfo qinfo, DataType dt)
{
+ ARM_COMPUTE_UNUSED(dt);
return static_cast<TOut>(dequantize_qsymm16(val, qinfo));
}
} // namespace
@@ -77,7 +86,7 @@ SimpleTensor<TOut> dequantization_layer(const SimpleTensor<TIn> &src)
// Dequantize slice
for(int s = 0; s < WH; ++s)
{
- dst[idx + s] = dequantize<TOut>(static_cast<TIn>(src[idx + s]), channel_qinfo);
+ dst[idx + s] = dequantize<TOut>(static_cast<TIn>(src[idx + s]), channel_qinfo, src_data_type);
}
}
}
@@ -89,7 +98,7 @@ SimpleTensor<TOut> dequantization_layer(const SimpleTensor<TIn> &src)
for(int i = 0; i < src.num_elements(); ++i)
{
- dst[i] = static_cast<TOut>(dequantize<TOut>(static_cast<TIn>(src[i]), quantization_info));
+ dst[i] = static_cast<TOut>(dequantize<TOut>(static_cast<TIn>(src[i]), quantization_info, src_data_type));
}
}