aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2019-11-11 18:24:22 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-11-12 21:22:47 +0000
commit8217c8e4f488eb32733c481ab3a4d905069479f1 (patch)
treef54e10c459d5bc5b847ef0caba075a636d9f1df5 /tests
parent94e0cf960ea6116eb57fa88d9b951f859b52c602 (diff)
downloadComputeLibrary-8217c8e4f488eb32733c481ab3a4d905069479f1.tar.gz
COMPMID-2895: Remove QASYMM8_PER_CHANNEL data type
Change-Id: I2d1b77370f8eceeaeae95306b4db5d90ababb76f Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-on: https://review.mlplatform.org/c/2266 Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/AssetsLibrary.h2
-rw-r--r--tests/Utils.h1
-rw-r--r--tests/datasets/DatatypeDataset.h2
-rw-r--r--tests/validation/Helpers.cpp9
-rw-r--r--tests/validation/Helpers.h9
-rw-r--r--tests/validation/fixtures/DequantizationLayerFixture.h12
-rw-r--r--tests/validation/reference/DequantizationLayer.cpp6
7 files changed, 9 insertions, 32 deletions
diff --git a/tests/AssetsLibrary.h b/tests/AssetsLibrary.h
index 280f6ddbd0..f8635ea576 100644
--- a/tests/AssetsLibrary.h
+++ b/tests/AssetsLibrary.h
@@ -632,7 +632,6 @@ void AssetsLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_t
{
case DataType::U8:
case DataType::QASYMM8:
- case DataType::QASYMM8_PER_CHANNEL:
{
std::uniform_int_distribution<uint8_t> distribution_u8(std::numeric_limits<uint8_t>::lowest(), std::numeric_limits<uint8_t>::max());
fill(tensor, distribution_u8, seed_offset);
@@ -640,6 +639,7 @@ void AssetsLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_t
}
case DataType::S8:
case DataType::QSYMM8:
+ case DataType::QSYMM8_PER_CHANNEL:
case DataType::QASYMM8_SIGNED:
{
std::uniform_int_distribution<int8_t> distribution_s8(std::numeric_limits<int8_t>::lowest(), std::numeric_limits<int8_t>::max());
diff --git a/tests/Utils.h b/tests/Utils.h
index 6b3935e526..aff63d3119 100644
--- a/tests/Utils.h
+++ b/tests/Utils.h
@@ -355,7 +355,6 @@ void store_value_with_data_type(void *ptr, T value, DataType data_type)
{
case DataType::U8:
case DataType::QASYMM8:
- case DataType::QASYMM8_PER_CHANNEL:
*reinterpret_cast<uint8_t *>(ptr) = value;
break;
case DataType::S8:
diff --git a/tests/datasets/DatatypeDataset.h b/tests/datasets/DatatypeDataset.h
index 9bdb346340..df0ddb3ce5 100644
--- a/tests/datasets/DatatypeDataset.h
+++ b/tests/datasets/DatatypeDataset.h
@@ -54,7 +54,7 @@ public:
QuantizedPerChannelTypes()
: ContainerDataset("QuantizedPerChannelTypes",
{
- DataType::QASYMM8_PER_CHANNEL
+ DataType::QSYMM8_PER_CHANNEL
})
{
}
diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp
index 95a5548628..e362e05b81 100644
--- a/tests/validation/Helpers.cpp
+++ b/tests/validation/Helpers.cpp
@@ -335,15 +335,6 @@ std::pair<int, int> get_symm_quantized_per_channel_bounds(const QuantizationInfo
return std::pair<int, int> { min_bound, max_bound };
}
-std::pair<int, int> get_asymm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id)
-{
- ARM_COMPUTE_ERROR_ON_MSG(min > max, "min must be lower equal than max");
-
- const int min_bound = quantize_qasymm8_per_channel(min, quant_info, channel_id);
- const int max_bound = quantize_qasymm8_per_channel(max, quant_info, channel_id);
- return std::pair<int, int> { min_bound, max_bound };
-}
-
template void get_tile(const SimpleTensor<float> &in, SimpleTensor<float> &roi, const Coordinates &coord);
template void get_tile(const SimpleTensor<half> &in, SimpleTensor<half> &roi, const Coordinates &coord);
template void get_tile(const SimpleTensor<int> &in, SimpleTensor<int> &roi, const Coordinates &coord);
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index 2c1df39f14..a0169752de 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -285,15 +285,6 @@ std::pair<int, int> get_quantized_bounds(const QuantizationInfo &quant_info, flo
* @param[in] channel_id Channel id for per channel quantization info.
*/
std::pair<int, int> get_symm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id = 0);
-
-/** Helper function to compute asymmetric quantized min and max bounds
- *
- * @param[in] quant_info Quantization info to be used for conversion
- * @param[in] min Floating point minimum value to be quantized
- * @param[in] max Floating point maximum value to be quantized
- * @param[in] channel_id Channel id for per channel quantization info.
- */
-std::pair<int, int> get_asymm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id = 0);
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/fixtures/DequantizationLayerFixture.h b/tests/validation/fixtures/DequantizationLayerFixture.h
index c7a818fcc7..f44f8658c2 100644
--- a/tests/validation/fixtures/DequantizationLayerFixture.h
+++ b/tests/validation/fixtures/DequantizationLayerFixture.h
@@ -101,12 +101,12 @@ protected:
switch(src_data_type)
{
case DataType::QASYMM8:
- case DataType::QASYMM8_PER_CHANNEL:
{
SimpleTensor<uint8_t> src{ shape, src_data_type, 1, _quantization_info };
fill(src);
return reference::dequantization_layer<T>(src);
}
+ case DataType::QSYMM8_PER_CHANNEL:
case DataType::QSYMM8:
{
SimpleTensor<int8_t> src{ shape, src_data_type, 1, _quantization_info };
@@ -138,16 +138,14 @@ protected:
return QuantizationInfo(1.f / distribution_scale_q16(gen));
case DataType::QSYMM8:
return QuantizationInfo(1.f / distribution_scale_q8(gen));
- case DataType::QASYMM8_PER_CHANNEL:
+ case DataType::QSYMM8_PER_CHANNEL:
{
- std::vector<float> scale(num_channels);
- std::vector<int32_t> offset(num_channels);
+ std::vector<float> scale(num_channels);
for(int32_t i = 0; i < num_channels; ++i)
{
- scale[i] = 1.f / distribution_scale_q8(gen);
- offset[i] = distribution_offset_q8(gen);
+ scale[i] = 1.f / distribution_offset_q8(gen);
}
- return QuantizationInfo(scale, offset);
+ return QuantizationInfo(scale);
}
case DataType::QASYMM8:
return QuantizationInfo(1.f / distribution_scale_q8(gen), distribution_offset_q8(gen));
diff --git a/tests/validation/reference/DequantizationLayer.cpp b/tests/validation/reference/DequantizationLayer.cpp
index 69a49a3d6d..16f25c4427 100644
--- a/tests/validation/reference/DequantizationLayer.cpp
+++ b/tests/validation/reference/DequantizationLayer.cpp
@@ -65,16 +65,14 @@ SimpleTensor<TOut> dequantization_layer(const SimpleTensor<TIn> &src)
const int C = src.shape().z();
const int N = src.shape().total_size() / (WH * C);
- const std::vector<float> qscales = src.quantization_info().scale();
- const std::vector<int32_t> qoffsets = src.quantization_info().offset();
- const bool has_offsets = src_data_type == DataType::QASYMM8_PER_CHANNEL;
+ const std::vector<float> qscales = src.quantization_info().scale();
for(int n = 0; n < N; ++n)
{
for(int c = 0; c < C; ++c)
{
const size_t idx = n * C * WH + c * WH;
- const UniformQuantizationInfo channel_qinfo = { qscales[c], has_offsets ? qoffsets[c] : 0 };
+ const UniformQuantizationInfo channel_qinfo = { qscales[c], 0 };
// Dequantize slice
for(int s = 0; s < WH; ++s)