aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/CL/ConvolutionLayer.cpp
diff options
context:
space:
mode:
authorChunosov <N.Chunosov@yandex.ru>2017-11-22 20:42:13 +0700
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:17 +0000
commit5124be5d1caa70964d452cf9a8cc7c67df31fa9d (patch)
tree77d74963e9c3f52050cbc264a692133395182e98 /tests/validation/CL/ConvolutionLayer.cpp
parent9873ea3f1ea238ba7abfb635807614517c52be4b (diff)
downloadComputeLibrary-5124be5d1caa70964d452cf9a8cc7c67df31fa9d.tar.gz
COMPMID-661: Convolution quantized (#32)
Change-Id: Id69df4ce98d1d89bdf9c9aa5c4d909659909b30f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110456 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/CL/ConvolutionLayer.cpp')
-rw-r--r--tests/validation/CL/ConvolutionLayer.cpp55
1 files changed, 45 insertions, 10 deletions
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index a6e07248aa..56e10f01b3 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -45,7 +45,8 @@ namespace
{
RelativeTolerance<float> tolerance_f32(0.05f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
-constexpr AbsoluteTolerance<float> tolerance_q(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
+constexpr AbsoluteTolerance<float> tolerance_fixed(1.0f); /**< Tolerance value for comparing reference's output against implementation's output for fixed point data types */
+constexpr AbsoluteTolerance<float> tolerance_qasymm8(0.0); /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
constexpr float tolerance_num = 0.07f; /**< Tolerance number */
/** CNN data types */
@@ -55,6 +56,7 @@ const auto CNNDataTypes = framework::dataset::make("DataType",
DataType::F32,
DataType::QS8,
DataType::QS16,
+ DataType::QASYMM8,
});
} // namespace
@@ -67,17 +69,22 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
// Set fixed point position data type allowed
int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
+ auto bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
+
// Create tensors
- CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, fixed_point_position);
- CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, fixed_point_position);
- CLTensor bias = create_tensor<CLTensor>(bias_shape, data_type, 1, fixed_point_position);
- CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position);
+ CLTensor src = create_tensor<CLTensor>(input_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor weights = create_tensor<CLTensor>(weights_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor bias = create_tensor<CLTensor>(bias_shape, bias_data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
+ CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1, fixed_point_position, QuantizationInfo(2.f / 255.f, 127));
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ const QuantizationInfo src_quantization_info = src.info()->quantization_info();
+ const QuantizationInfo weights_quantization_info = weights.info()->quantization_info();
+
// Create and configure function
CLConvolutionLayer conv;
conv.configure(&src, &weights, &bias, &dst, info);
@@ -93,6 +100,10 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
validate(bias.info()->valid_region(), bias_valid_region);
validate(dst.info()->valid_region(), dst_valid_region);
+ // Validate QuantizationInfo
+ ARM_COMPUTE_EXPECT(src.info()->quantization_info() == src_quantization_info, framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(weights.info()->quantization_info() == weights_quantization_info, framework::LogLevel::ERRORS);
+
// Validate padding
//TODO(COMPMID-415) Need to validate padding?
}
@@ -143,7 +154,7 @@ TEST_SUITE_END()
template <typename T>
using CLConvolutionLayerFixedPointFixture = ConvolutionValidationFixedPointFixture<CLTensor, CLAccessor, CLConvolutionLayer, T>;
-TEST_SUITE(Quantized)
+TEST_SUITE(FixedPoint)
TEST_SUITE(QS8)
// We test for fixed point precision [4,6]
FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
@@ -153,7 +164,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerFixedPointFixture<int8_t>, fr
framework::dataset::make("FractionalBits", 4, 7)))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
+ validate(CLAccessor(_target), _reference, tolerance_fixed);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
@@ -162,7 +173,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixedPointFixture<int8_t>, fr
framework::dataset::make("FractionalBits", 4, 7)))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
+ validate(CLAccessor(_target), _reference, tolerance_fixed);
}
TEST_SUITE_END()
@@ -175,7 +186,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerFixedPointFixture<int16_t>, f
framework::dataset::make("FractionalBits", 1, 14)))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
+ validate(CLAccessor(_target), _reference, tolerance_fixed);
}
FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
@@ -184,7 +195,31 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerFixedPointFixture<int16_t>, f
framework::dataset::make("FractionalBits", 1, 14)))
{
// Validate output
- validate(CLAccessor(_target), _reference, tolerance_q);
+ validate(CLAccessor(_target), _reference, tolerance_fixed);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+template <typename T>
+using CLConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLConvolutionLayer, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 0) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
TEST_SUITE_END()
TEST_SUITE_END()