From 8cf8c1123440c2002ee108d1949529bf21eac944 Mon Sep 17 00:00:00 2001 From: Usama Arif Date: Thu, 14 Mar 2019 15:36:54 +0000 Subject: COMPMID-1944 Add support for "reflect" padding mode in NEPad Change-Id: I56c42524497d37d44708648571fa211ac1afbd98 Signed-off-by: Usama Arif Reviewed-on: https://review.mlplatform.org/c/885 Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins Reviewed-by: Pablo Marquez --- tests/validation/CL/PadLayer.cpp | 20 ++-- tests/validation/NEON/PadLayer.cpp | 148 +++++++++++++++------------- tests/validation/fixtures/PadLayerFixture.h | 48 ++++++--- 3 files changed, 128 insertions(+), 88 deletions(-) (limited to 'tests') diff --git a/tests/validation/CL/PadLayer.cpp b/tests/validation/CL/PadLayer.cpp index 4bbd7b8e14..9430b1212b 100644 --- a/tests/validation/CL/PadLayer.cpp +++ b/tests/validation/CL/PadLayer.cpp @@ -94,8 +94,9 @@ TEST_SUITE(Float) TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunPadding, CLPaddingFixture, framework::DatasetMode::ALL, combine( - combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F32 })), - PaddingSizesDataset)) + combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F32 })), + PaddingSizesDataset), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT }))) { // Validate output validate(CLAccessor(_target), _reference); @@ -105,8 +106,9 @@ TEST_SUITE_END() // FP32 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunPadding, CLPaddingFixture, framework::DatasetMode::ALL, combine( - combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F16 })), - PaddingSizesDataset)) + combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F16 })), + PaddingSizesDataset), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT }))) { // Validate output validate(CLAccessor(_target), _reference); @@ -118,8 +120,9 @@ TEST_SUITE(Integer) TEST_SUITE(S8) FIXTURE_DATA_TEST_CASE(RunPadding, CLPaddingFixture, framework::DatasetMode::ALL, combine( - combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::S8 })), - PaddingSizesDataset)) + combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::S8 })), + PaddingSizesDataset), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT }))) { // Validate output validate(CLAccessor(_target), _reference); @@ -131,8 +134,9 @@ TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) FIXTURE_DATA_TEST_CASE(RunPadding, CLPaddingFixture, framework::DatasetMode::ALL, combine( - combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8 })), - PaddingSizesDataset)) + combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8 })), + PaddingSizesDataset), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT }))) { // Validate output validate(CLAccessor(_target), _reference); diff --git a/tests/validation/NEON/PadLayer.cpp b/tests/validation/NEON/PadLayer.cpp index 90d3ae98d8..5049347f27 100644 --- a/tests/validation/NEON/PadLayer.cpp +++ b/tests/validation/NEON/PadLayer.cpp @@ -42,12 +42,14 @@ namespace validation { namespace { -const auto PaddingSizesDataset = framework::dataset::make("PaddingSize", { PaddingList{ { 0, 0 } }, +const auto PaddingSizesDataset = framework::dataset::make("PaddingSize", +{ + PaddingList{ { 0, 0 } }, PaddingList{ { 1, 1 } }, PaddingList{ { 1, 1 }, { 2, 2 } }, - PaddingList{ { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 } }, - PaddingList{ { 0, 0 }, { 1, 0 }, { 0, 1 }, { 1, 2 } }, - PaddingList{ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 1, 1 } } + PaddingList{ { 1, 1 }, { 1, 1 }, { 1, 1 } }, + PaddingList{ { 0, 0 }, { 1, 0 }, { 0, 1 } }, + PaddingList{ { 0, 1 }, { 1, 0 }, { 0, 1 } }, }); } // namespace @@ -57,33 +59,62 @@ TEST_SUITE(PadLayer) // *INDENT-OFF* // clang-format off -DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( - framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/output - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32) - }), - framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), - TensorInfo(TensorShape(28U, 11U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(29U, 17U, 2U), 1, DataType::F32), - TensorInfo(TensorShape(29U, 15U, 4U, 3U), 1, DataType::F32), - TensorInfo(TensorShape(27U, 14U, 3U, 4U), 1, DataType::F32), - TensorInfo(TensorShape(32U, 13U, 2U, 3U), 1, DataType::F32) - })), - framework::dataset::make("PaddingSize", { PaddingList{{0, 0}}, - PaddingList{{1, 1}}, - PaddingList{{1, 1}, {2, 2}}, - PaddingList{{1,1}, {1,1}, {1,1}, {1,1}}, - PaddingList{{0,0}, {1,0}, {0,1}, {1,2}}, - PaddingList{{0,0}, {0,0}, {0,0}, {1,1}} - })), - framework::dataset::make("Expected", { false, false, true, true, true, true })), - input_info, output_info, padding, expected) +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/output + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type input/output + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32) + }), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(28U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(29U, 17U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(29U, 15U, 4U, 3U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 14U, 3U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U, 3U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(28U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(29U, 17U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(29U, 15U, 4U, 3U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 14U, 3U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U, 3U), 1, DataType::F32) + })), + framework::dataset::make("PaddingSize", { PaddingList{{0, 0}}, + PaddingList{{1, 1}}, + PaddingList{{1, 1}, {2, 2}}, + PaddingList{{1,1}, {1,1}, {1,1}, {1,1}}, + PaddingList{{0,0}, {1,0}, {0,1}, {1,2}}, + PaddingList{{0,0}, {0,0}, {0,0}, {1,1}}, + PaddingList{{0, 0}}, + PaddingList{{1, 1}}, + PaddingList{{1, 1}, {2, 2}}, + PaddingList{{1,1}, {1,1}, {1,1}, {1,1}}, + PaddingList{{0,0}, {1,0}, {0,1}, {1,2}}, + PaddingList{{0,0}, {0,0}, {0,0}, {1,1}} + })), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, + PaddingMode::CONSTANT, + PaddingMode::CONSTANT, + PaddingMode::CONSTANT, + PaddingMode::CONSTANT, + PaddingMode::CONSTANT, + PaddingMode::REFLECT, + PaddingMode::REFLECT, + PaddingMode::REFLECT, + PaddingMode::REFLECT, + PaddingMode::REFLECT, + PaddingMode::SYMMETRIC })), + framework::dataset::make("Expected", { false, false, true, true, true, true, false, false, true, false, false, true })), + input_info, output_info, padding, mode, expected) { - Status s = NEPadLayer::validate(&input_info.clone()->set_is_resizable(true), &output_info.clone()->set_is_resizable(true), padding); - ARM_COMPUTE_EXPECT(bool(s) == expected, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bool(NEPadLayer::validate(&input_info.clone()->set_is_resizable(true), &output_info.clone()->set_is_resizable(true), padding, PixelValue(), mode)) == expected, framework::LogLevel::ERRORS); } // clang-format on @@ -96,17 +127,17 @@ TEST_SUITE(Float) TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, NEPaddingFixture, framework::DatasetMode::ALL, - combine( - combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F32 })), - PaddingSizesDataset)) + combine(combine(combine(datasets::Small3DShapes(), framework::dataset::make("DataType", { DataType::F32 })), + PaddingSizesDataset), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT }))) { // Validate output validate(Accessor(_target), _reference); } FIXTURE_DATA_TEST_CASE(RunLarge, NEPaddingFixture, framework::DatasetMode::NIGHTLY, - combine( - combine(datasets::LargeShapes(), framework::dataset::make("DataType", { DataType::F32 })), - PaddingSizesDataset)) + combine(combine(combine(datasets::Large3DShapes(), framework::dataset::make("DataType", { DataType::F32 })), + PaddingSizesDataset), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT, PaddingMode::SYMMETRIC }))) { // Validate output validate(Accessor(_target), _reference); @@ -116,17 +147,17 @@ TEST_SUITE_END() // FP32 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, NEPaddingFixture, framework::DatasetMode::ALL, - combine( - combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F16 })), - PaddingSizesDataset)) + combine(combine(combine(datasets::Small3DShapes(), framework::dataset::make("DataType", { DataType::F16 })), + PaddingSizesDataset), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT }))) { // Validate output validate(Accessor(_target), _reference); } FIXTURE_DATA_TEST_CASE(RunLarge, NEPaddingFixture, framework::DatasetMode::NIGHTLY, - combine( - combine(datasets::LargeShapes(), framework::dataset::make("DataType", { DataType::F16 })), - PaddingSizesDataset)) + combine(combine(combine(datasets::Large3DShapes(), framework::dataset::make("DataType", { DataType::F16 })), + PaddingSizesDataset), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT, PaddingMode::SYMMETRIC }))) { // Validate output validate(Accessor(_target), _reference); @@ -135,41 +166,20 @@ TEST_SUITE_END() // FP16 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ TEST_SUITE_END() // Float -TEST_SUITE(Integer) -TEST_SUITE(S8) -FIXTURE_DATA_TEST_CASE(RunSmall, NEPaddingFixture, framework::DatasetMode::ALL, - combine( - combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::S8 })), - PaddingSizesDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -FIXTURE_DATA_TEST_CASE(RunLarge, NEPaddingFixture, framework::DatasetMode::NIGHTLY, - combine( - combine(datasets::LargeShapes(), framework::dataset::make("DataType", { DataType::S8 })), - PaddingSizesDataset)) -{ - // Validate output - validate(Accessor(_target), _reference); -} -TEST_SUITE_END() // S8 -TEST_SUITE_END() // Integer - TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) FIXTURE_DATA_TEST_CASE(RunSmall, NEPaddingFixture, framework::DatasetMode::ALL, - combine( - combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::QASYMM8 })), - PaddingSizesDataset)) + combine(combine(combine(datasets::Small3DShapes(), framework::dataset::make("DataType", { DataType::QASYMM8 })), + PaddingSizesDataset), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT }))) { // Validate output validate(Accessor(_target), _reference); } FIXTURE_DATA_TEST_CASE(RunLarge, NEPaddingFixture, framework::DatasetMode::NIGHTLY, - combine( - combine(datasets::LargeShapes(), framework::dataset::make("DataType", { DataType::QASYMM8 })), - PaddingSizesDataset)) + combine(combine(combine(datasets::Large3DShapes(), framework::dataset::make("DataType", { DataType::QASYMM8 })), + PaddingSizesDataset), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT, PaddingMode::SYMMETRIC }))) { // Validate output validate(Accessor(_target), _reference); diff --git a/tests/validation/fixtures/PadLayerFixture.h b/tests/validation/fixtures/PadLayerFixture.h index 839313a118..3538cabfeb 100644 --- a/tests/validation/fixtures/PadLayerFixture.h +++ b/tests/validation/fixtures/PadLayerFixture.h @@ -45,30 +45,54 @@ class PaddingFixture : public framework::Fixture { public: template - void setup(TensorShape shape, DataType data_type, const PaddingList &padding) + void setup(TensorShape shape, DataType data_type, const PaddingList &padding, const PaddingMode mode) { - _target = compute_target(shape, data_type, padding); - _reference = compute_reference(shape, data_type, padding); + PaddingList clamped_padding = padding; + if(mode != PaddingMode::CONSTANT) + { + // Clamp padding to prevent applying more than is possible. + for(uint32_t i = 0; i < padding.size(); ++i) + { + if(mode == PaddingMode::REFLECT) + { + clamped_padding[i].first = std::min(static_cast(padding[i].first), static_cast(shape[i] - 1)); + clamped_padding[i].second = std::min(static_cast(padding[i].second), static_cast(shape[i] - 1)); + } + else + { + clamped_padding[i].first = std::min(static_cast(padding[i].first), static_cast(shape[i])); + clamped_padding[i].second = std::min(static_cast(padding[i].second), static_cast(shape[i])); + } + } + } + _target = compute_target(shape, data_type, clamped_padding, mode); + _reference = compute_reference(shape, data_type, clamped_padding, mode); } protected: template - void fill(U &&tensor) + void fill(U &&tensor, int i) { - library->fill_tensor_uniform(tensor, 0); + library->fill_tensor_uniform(tensor, i); } TensorType compute_target(const TensorShape &shape, DataType data_type, - const PaddingList &paddings) + const PaddingList &paddings, + const PaddingMode mode) { // Create tensors TensorType src = create_tensor(shape, data_type); TensorType dst; + TensorType const_val = create_tensor(TensorShape(1), data_type); + const_val.allocator()->allocate(); + fill(AccessorType(const_val), 1); + T const_value = *static_cast(AccessorType(const_val)(Coordinates(0))); + // Create and configure function FunctionType padding; - padding.configure(&src, &dst, paddings); + padding.configure(&src, &dst, paddings, const_value, mode); ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); @@ -81,7 +105,7 @@ protected: ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); // Fill tensors - fill(AccessorType(src)); + fill(AccessorType(src), 0); // Compute function padding.run(); @@ -90,15 +114,17 @@ protected: } SimpleTensor compute_reference(const TensorShape &shape, DataType data_type, - const PaddingList &paddings) + const PaddingList &paddings, const PaddingMode mode) { // Create reference tensor SimpleTensor src{ shape, data_type }; + SimpleTensor const_val{ TensorShape(1), data_type }; // Fill reference tensor - fill(src); + fill(src, 0); + fill(const_val, 1); - return reference::pad_layer(src, paddings); + return reference::pad_layer(src, paddings, const_val[0], mode); } TensorType _target{}; -- cgit v1.2.1