aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/NEON/DilatedConvolutionLayer.cpp
diff options
context:
space:
mode:
authorIsabella Gottardi <isabella.gottardi@arm.com>2018-02-12 14:59:19 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commit3f217ec4ff11e20fe686beb9a28d0bbd80a56cd6 (patch)
tree81db8baab925af5b416b66d0328be2eb49543824 /tests/validation/NEON/DilatedConvolutionLayer.cpp
parentd9eb27597eabe5b7c17520f4f9b3f8a282d72573 (diff)
downloadComputeLibrary-3f217ec4ff11e20fe686beb9a28d0bbd80a56cd6.tar.gz
COMPMID-908 - Merge Activation layer with Convolution Layer (NEON. CL, GLES)
Change-Id: Iab06d0768ecf805b841e601185608aae88cf9166 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/120874 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/NEON/DilatedConvolutionLayer.cpp')
-rw-r--r--tests/validation/NEON/DilatedConvolutionLayer.cpp80
1 files changed, 48 insertions, 32 deletions
diff --git a/tests/validation/NEON/DilatedConvolutionLayer.cpp b/tests/validation/NEON/DilatedConvolutionLayer.cpp
index c0a72ec8cb..1e8c19fc5e 100644
--- a/tests/validation/NEON/DilatedConvolutionLayer.cpp
+++ b/tests/validation/NEON/DilatedConvolutionLayer.cpp
@@ -163,16 +163,18 @@ using NEGEMMDilatedConvolutionLayerFixture = ConvolutionValidationFixture<Tensor
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::F16)))
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f16);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::F16)))
+ framework::dataset::make("DataType", DataType::F16)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f16);
@@ -181,16 +183,18 @@ TEST_SUITE_END()
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::F32)))
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::F32)))
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
@@ -204,18 +208,22 @@ using NEGEMMDilatedConvolutionLayerFixedPointFixture = ConvolutionValidationFixe
TEST_SUITE(FixedPoint)
TEST_SUITE(QS8)
// We test for fixed point precision [4,6]
-FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)))
+FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true, false })),
+ framework::dataset::make("DataType", DataType::QS8)),
+ framework::dataset::make("FractionalBits", 4, 7)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_q);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::QS8)),
- framework::dataset::make("FractionalBits", 4, 7)))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true, false })),
+ framework::dataset::make("DataType", DataType::QS8)),
+ framework::dataset::make("FractionalBits", 4, 7)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_q);
@@ -224,18 +232,22 @@ TEST_SUITE_END()
TEST_SUITE(QS16)
// Testing for fixed point position [1,14)
-FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+FIXTURE_DATA_TEST_CASE(RunTiny, NEGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(datasets::TinyDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true, false })),
+ framework::dataset::make("DataType", DataType::QS16)),
+ framework::dataset::make("FractionalBits", 1, 14)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_q);
}
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true, false })),
- framework::dataset::make("DataType", DataType::QS16)),
- framework::dataset::make("FractionalBits", 1, 14)))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true, false })),
+ framework::dataset::make("DataType", DataType::QS16)),
+ framework::dataset::make("FractionalBits", 1, 14)),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_q);
@@ -248,18 +260,22 @@ using NEGEMMDilatedConvolutionLayerQuantizedFixture = ConvolutionValidationQuant
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(datasets::SmallDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
- framework::dataset::make("ReshapeWeights", { true })),
- framework::dataset::make("DataType", DataType::QASYMM8)),
- framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMDilatedConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(combine(datasets::LargeDilatedConvolutionLayerDataset(),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ framework::dataset::make("ActivationLayerInfo", ActivationLayerInfo())))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);