aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2019-08-22 11:44:04 +0100
committerMichalis Spyrou <michalis.spyrou@arm.com>2019-08-23 13:13:08 +0000
commit29a01c90fc372d31188ab7157b45b32ce24fa9b3 (patch)
tree419b7abc22c56fde8dece4c80c328a209c041d94 /tests
parentfb0fdcdaec57e6f8e1b96f924411921cc0ba6d94 (diff)
downloadComputeLibrary-29a01c90fc372d31188ab7157b45b32ce24fa9b3.tar.gz
COMPMID-2417: NEDequantizationLayer support for QASYMM8_PER_CHANNEL
Change-Id: I1ef4ce8610e11e81702b0b7f0f7c437fed49833e Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-on: https://review.mlplatform.org/c/1795 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/AssetsLibrary.h1
-rw-r--r--tests/datasets/DatatypeDataset.h11
-rw-r--r--tests/validation/NEON/DequantizationLayer.cpp6
-rw-r--r--tests/validation/fixtures/DequantizationLayerFixture.h57
-rw-r--r--tests/validation/reference/DequantizationLayer.cpp8
5 files changed, 56 insertions, 27 deletions
diff --git a/tests/AssetsLibrary.h b/tests/AssetsLibrary.h
index 2f2665f381..2ac13468de 100644
--- a/tests/AssetsLibrary.h
+++ b/tests/AssetsLibrary.h
@@ -628,6 +628,7 @@ void AssetsLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_t
{
case DataType::U8:
case DataType::QASYMM8:
+ case DataType::QASYMM8_PER_CHANNEL:
{
std::uniform_int_distribution<uint8_t> distribution_u8(std::numeric_limits<uint8_t>::lowest(), std::numeric_limits<uint8_t>::max());
fill(tensor, distribution_u8, seed_offset);
diff --git a/tests/datasets/DatatypeDataset.h b/tests/datasets/DatatypeDataset.h
index a158a5f52d..9bdb346340 100644
--- a/tests/datasets/DatatypeDataset.h
+++ b/tests/datasets/DatatypeDataset.h
@@ -48,6 +48,17 @@ public:
{
}
};
+class QuantizedPerChannelTypes final : public framework::dataset::ContainerDataset<std::vector<DataType>>
+{
+public:
+ QuantizedPerChannelTypes()
+ : ContainerDataset("QuantizedPerChannelTypes",
+ {
+ DataType::QASYMM8_PER_CHANNEL
+ })
+ {
+ }
+};
} // namespace datasets
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/NEON/DequantizationLayer.cpp b/tests/validation/NEON/DequantizationLayer.cpp
index a4606fe8a0..005ed6900c 100644
--- a/tests/validation/NEON/DequantizationLayer.cpp
+++ b/tests/validation/NEON/DequantizationLayer.cpp
@@ -123,13 +123,15 @@ TEST_SUITE_END() // FP16
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), datasets::QuantizedTypes()),
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDequantizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), concat(datasets::QuantizedTypes(),
+ datasets::QuantizedPerChannelTypes())),
framework::dataset::make("DataType", DataType::F32)))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), datasets::QuantizedTypes()),
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDequantizationLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), concat(datasets::QuantizedTypes(),
+ datasets::QuantizedPerChannelTypes())),
framework::dataset::make("DataType", DataType::F32)))
{
// Validate output
diff --git a/tests/validation/fixtures/DequantizationLayerFixture.h b/tests/validation/fixtures/DequantizationLayerFixture.h
index 2c8f05746d..4842ee1c59 100644
--- a/tests/validation/fixtures/DequantizationLayerFixture.h
+++ b/tests/validation/fixtures/DequantizationLayerFixture.h
@@ -49,7 +49,7 @@ public:
template <typename...>
void setup(TensorShape shape, DataType src_data_type, DataType dst_datatype)
{
- _quantization_info = generate_quantization_info(src_data_type);
+ _quantization_info = generate_quantization_info(src_data_type, shape.z());
_target = compute_target(shape, src_data_type, dst_datatype);
_reference = compute_reference(shape, src_data_type);
}
@@ -92,32 +92,34 @@ protected:
SimpleTensor<T> compute_reference(const TensorShape &shape, DataType src_data_type)
{
- if(src_data_type == DataType::QASYMM8)
+ switch(src_data_type)
{
- SimpleTensor<uint8_t> src{ shape, src_data_type, 1, _quantization_info };
- fill(src);
- return reference::dequantization_layer<T>(src);
- }
- else if(src_data_type == DataType::QSYMM8)
- {
- SimpleTensor<int8_t> src{ shape, src_data_type, 1, _quantization_info };
- fill(src);
- return reference::dequantization_layer<T>(src);
- }
- else if(src_data_type == DataType::QSYMM16)
- {
- SimpleTensor<int16_t> src{ shape, src_data_type, 1, _quantization_info };
- fill(src);
- return reference::dequantization_layer<T>(src);
- }
- else
- {
- ARM_COMPUTE_ERROR("Unsupported data type");
+ case DataType::QASYMM8:
+ case DataType::QASYMM8_PER_CHANNEL:
+ {
+ SimpleTensor<uint8_t> src{ shape, src_data_type, 1, _quantization_info };
+ fill(src);
+ return reference::dequantization_layer<T>(src);
+ }
+ case DataType::QSYMM8:
+ {
+ SimpleTensor<int8_t> src{ shape, src_data_type, 1, _quantization_info };
+ fill(src);
+ return reference::dequantization_layer<T>(src);
+ }
+ case DataType::QSYMM16:
+ {
+ SimpleTensor<int16_t> src{ shape, src_data_type, 1, _quantization_info };
+ fill(src);
+ return reference::dequantization_layer<T>(src);
+ }
+ default:
+ ARM_COMPUTE_ERROR("Unsupported data type");
}
}
protected:
- QuantizationInfo generate_quantization_info(DataType data_type)
+ QuantizationInfo generate_quantization_info(DataType data_type, int32_t num_channels)
{
std::mt19937 gen(library.get()->seed());
std::uniform_int_distribution<> distribution_scale_q8(1, 255);
@@ -130,6 +132,17 @@ protected:
return QuantizationInfo(1.f / distribution_scale_q16(gen));
case DataType::QSYMM8:
return QuantizationInfo(1.f / distribution_scale_q8(gen));
+ case DataType::QASYMM8_PER_CHANNEL:
+ {
+ std::vector<float> scale(num_channels);
+ std::vector<int32_t> offset(num_channels);
+ for(int32_t i = 0; i < num_channels; ++i)
+ {
+ scale[i] = 1.f / distribution_scale_q8(gen);
+ offset[i] = distribution_offset_q8(gen);
+ }
+ return QuantizationInfo(scale, offset);
+ }
case DataType::QASYMM8:
return QuantizationInfo(1.f / distribution_scale_q8(gen), distribution_offset_q8(gen));
default:
diff --git a/tests/validation/reference/DequantizationLayer.cpp b/tests/validation/reference/DequantizationLayer.cpp
index cceee0421c..74686bdaaf 100644
--- a/tests/validation/reference/DequantizationLayer.cpp
+++ b/tests/validation/reference/DequantizationLayer.cpp
@@ -59,20 +59,22 @@ SimpleTensor<TOut> dequantization_layer_nchw(const SimpleTensor<TIn> &src)
SimpleTensor<TOut> dst{ src.shape(), dst_data_type };
- if(src_data_type == DataType::QSYMM8_PER_CHANNEL)
+ if(is_data_type_quantized_per_channel(src_data_type))
{
const int WH = src.shape().x() * src.shape().y();
const int C = src.shape().z();
const int N = src.shape().total_size() / (WH * C);
- const std::vector<float> qscales = src.quantization_info().scale();
+ const std::vector<float> qscales = src.quantization_info().scale();
+ const std::vector<int32_t> qoffsets = src.quantization_info().offset();
+ const bool has_offsets = src_data_type == DataType::QASYMM8_PER_CHANNEL;
for(int n = 0; n < N; ++n)
{
for(int c = 0; c < C; ++c)
{
const size_t idx = n * C * WH + c * WH;
- const UniformQuantizationInfo channel_qinfo = { qscales[c], 0 };
+ const UniformQuantizationInfo channel_qinfo = { qscales[c], has_offsets ? qoffsets[c] : 0 };
// Dequantize slice
for(int s = 0; s < WH; ++s)