diff options
author | Gian Marco Iodice <gianmarco.iodice@arm.com> | 2021-04-26 08:39:28 +0100 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2021-04-27 11:44:17 +0000 |
commit | 40471d12a19088df4af6ad80e5c0437d724dd8fa (patch) | |
tree | d17c921b0285d447d6055c7bd88e9962bf4e8f1d /tests/validation | |
parent | 3eb5d29de823f7dbe0dc6b3a882a7db5950428a3 (diff) | |
download | ComputeLibrary-40471d12a19088df4af6ad80e5c0437d724dd8fa.tar.gz |
Add optimization for global pooling in pooling_layer.cl
- Simplify the implementation when the pooling size has the same spatial
dimensions of the input tensor
- Rework the heuristic for F32/F16
- Add test for validating the global pooling path
- Fix compare_dimensions in validation. The validation fails because we have different
number of dimensions for NCHW and NHWC (e.g. 1,1,2,1(NCHW) -> 2,1,1,1(NHWC)
Change-Id: Iba680cb30bf2a5d0952265a4cc9794f368549ca5
Signed-off-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5510
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r-- | tests/validation/CL/PoolingLayer.cpp | 121 | ||||
-rw-r--r-- | tests/validation/Validation.h | 18 |
2 files changed, 105 insertions, 34 deletions
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp index f42c187f8f..0153e659ae 100644 --- a/tests/validation/CL/PoolingLayer.cpp +++ b/tests/validation/CL/PoolingLayer.cpp @@ -159,12 +159,12 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<float>, framework::Datase validate(CLAccessor(_target), _reference, tolerance_f32); } FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), - combine(combine(combine(combine(datasets::PoolingTypes(), - framework::dataset::make("PoolingSize", { Size2D(2, 2) })), - framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 0, 0) })), - framework::dataset::make("ExcludePadding", { false })), - framework::dataset::make("DataType", DataType::F32))), - pool_data_layout_dataset)) + combine(combine(combine(combine(datasets::PoolingTypes(), + framework::dataset::make("PoolingSize", { Size2D(2, 2) })), + framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 0, 0) })), + framework::dataset::make("ExcludePadding", { false })), + framework::dataset::make("DataType", DataType::F32))), + pool_data_layout_dataset)) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); @@ -188,6 +188,44 @@ FIXTURE_DATA_TEST_CASE(RunSmallIndices, CLPoolingLayerIndicesFixture<float>, fra validate(CLAccessor(_target_indices), _ref_indices); } +TEST_SUITE(GlobalPooling) +// *INDENT-OFF* +// clang-format off +FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<float>, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine( + framework::dataset::make("InputShape", { TensorShape(27U, 13U, 2U), + TensorShape(27U, 13U, 2U, 4U) + }), + framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })), + framework::dataset::make("PoolingSize", { Size2D(27, 13) })), + framework::dataset::make("PadStride", PadStrideInfo(1, 1, 0, 0))), + framework::dataset::make("ExcludePadding", false)), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("DataLayout", DataLayout::NHWC))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<float>, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine( + framework::dataset::make("InputShape", { TensorShape(79U, 37U, 11U), + TensorShape(79U, 37U, 11U, 4U) + }), + framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })), + framework::dataset::make("PoolingSize", { Size2D(79, 37) })), + framework::dataset::make("PadStride", PadStrideInfo(1, 1, 0, 0))), + framework::dataset::make("ExcludePadding", false)), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("DataLayout", DataLayout::NHWC))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} +// clang-format on +// *INDENT-ON* +TEST_SUITE_END() // GlobalPooling + TEST_SUITE_END() // FP32 TEST_SUITE(FP16) @@ -216,6 +254,45 @@ FIXTURE_DATA_TEST_CASE(RunSmallIndices, CLPoolingLayerIndicesFixture<half>, fram validate(CLAccessor(_target), _reference, tolerance_f32); validate(CLAccessor(_target_indices), _ref_indices); } + +TEST_SUITE(GlobalPooling) +// *INDENT-OFF* +// clang-format off +FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<half>, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine( + framework::dataset::make("InputShape", { TensorShape(27U, 13U, 2U), + TensorShape(27U, 13U, 2U, 4U) + }), + framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })), + framework::dataset::make("PoolingSize", { Size2D(27, 13) })), + framework::dataset::make("PadStride", PadStrideInfo(1, 1, 0, 0))), + framework::dataset::make("ExcludePadding", false)), + framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("DataLayout", DataLayout::NHWC))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f16); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<half>, framework::DatasetMode::NIGHTLY, + combine(combine(combine(combine(combine(combine( + framework::dataset::make("InputShape", { TensorShape(79U, 37U, 11U), + TensorShape(79U, 37U, 11U, 4U) + }), + framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })), + framework::dataset::make("PoolingSize", { Size2D(79, 37) })), + framework::dataset::make("PadStride", PadStrideInfo(1, 1, 0, 0))), + framework::dataset::make("ExcludePadding", false)), + framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("DataLayout", DataLayout::NHWC))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f16); +} +// clang-format on +// *INDENT-ON* +TEST_SUITE_END() // GlobalPooling + TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float @@ -238,14 +315,14 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture<uint8_t>, framew validate(CLAccessor(_target), _reference, tolerance_qasymm8); } FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(), - combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), - framework::dataset::make("PoolingSize", { Size2D(2, 2) })), - framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })), - framework::dataset::make("ExcludePadding", { true })), - framework::dataset::make("DataType", DataType::QASYMM8))), - framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })), - framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 255.f, 10) })), - framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 255.f, 5) }))) + combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), + framework::dataset::make("PoolingSize", { Size2D(2, 2) })), + framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })), + framework::dataset::make("ExcludePadding", { true })), + framework::dataset::make("DataType", DataType::QASYMM8))), + framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })), + framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 255.f, 10) })), + framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 255.f, 5) }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -264,14 +341,14 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture<int8_t>, framewo validate(CLAccessor(_target), _reference, tolerance_qasymm8_s); } FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(), - combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), - framework::dataset::make("PoolingSize", { Size2D(2, 2) })), - framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })), - framework::dataset::make("ExcludePadding", { true })), - framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))), - framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })), - framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) })), - framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) }))) + combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), + framework::dataset::make("PoolingSize", { Size2D(2, 2) })), + framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })), + framework::dataset::make("ExcludePadding", { true })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))), + framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })), + framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) })), + framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8_s); diff --git a/tests/validation/Validation.h b/tests/validation/Validation.h index a75562bac2..f1ce0fecc7 100644 --- a/tests/validation/Validation.h +++ b/tests/validation/Validation.h @@ -159,11 +159,13 @@ bool compare_dimensions(const Dimensions<T> &dimensions1, const Dimensions<T> &d { // In case a 1D/2D shape becomes 3D after permutation, the permuted tensor will have two/one dimension(s) more and the first (two) value(s) will be 1 // clang-format off - if((dimensions1.num_dimensions() != dimensions2.num_dimensions()) && - ((dimensions1.num_dimensions() != (dimensions2.num_dimensions() + 1)) || (dimensions1.x() != 1)) && - ((dimensions1.num_dimensions() != (dimensions2.num_dimensions() + 2)) || (dimensions1.x() != 1) || (dimensions1.y() != 1))) + const auto max_dims = std::max(dimensions1.num_dimensions(), dimensions2.num_dimensions()); + for(unsigned int i = 3; i < max_dims; ++i) { - return false; + if(dimensions1[i] != dimensions2[i]) + { + return false; + } } // clang-format on @@ -171,14 +173,6 @@ bool compare_dimensions(const Dimensions<T> &dimensions1, const Dimensions<T> &d { return false; } - - for(unsigned int i = 3; i < dimensions1.num_dimensions(); ++i) - { - if(dimensions1[i] != dimensions2[i]) - { - return false; - } - } } return true; |