From 9032ee32da54804806a3f26cbbf5a62b3c764f72 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Wed, 7 Aug 2019 17:04:11 +0100 Subject: MLCE-129: NEPad 30x slower than TensorFlow's implementation Change-Id: I44770e6a3134c70c4bd58f890d06cb43c9bd8bff Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/1853 Reviewed-by: Giorgio Arena Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- tests/benchmark/NEON/PadLayer.cpp | 87 +++++++++++++++++++++++ tests/benchmark/fixtures/PadLayerFixture.h | 109 +++++++++++++++++++++++++++++ 2 files changed, 196 insertions(+) create mode 100644 tests/benchmark/NEON/PadLayer.cpp create mode 100644 tests/benchmark/fixtures/PadLayerFixture.h (limited to 'tests/benchmark') diff --git a/tests/benchmark/NEON/PadLayer.cpp b/tests/benchmark/NEON/PadLayer.cpp new file mode 100644 index 0000000000..c55c93b8a6 --- /dev/null +++ b/tests/benchmark/NEON/PadLayer.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/functions/NEPadLayer.h" +#include "arm_compute/runtime/Tensor.h" +#include "arm_compute/runtime/TensorAllocator.h" +#include "tests/NEON/Accessor.h" +#include "tests/benchmark/fixtures/PadLayerFixture.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/datasets/SplitDataset.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" + +namespace arm_compute +{ +namespace test +{ +namespace benchmark +{ +namespace +{ +const auto Fssd_25_8bit_ShapesDataset = framework::dataset::make("TensorShape", +{ + TensorShape{ 320U, 320U, 3U }, + TensorShape{ 160U, 160U, 16U }, + TensorShape{ 80U, 80U, 32U }, + TensorShape{ 40U, 40U, 64U }, + TensorShape{ 20U, 20U, 128U }, + TensorShape{ 10U, 10U, 256U }, + TensorShape{ 10U, 10U, 64U }, + TensorShape{ 5U, 5U, 32U }, + TensorShape{ 3U, 3U, 32U }, + TensorShape{ 2U, 2U, 32U } +}); + +const auto PaddingSizesDataset = framework::dataset::make("PaddingSize", +{ + PaddingList{ { 1, 1 }, { 1, 1 } }, +}); +} // namespace + +TEST_SUITE(NEON) +TEST_SUITE(PadLayer) + +template +using NEPaddingFixture = PaddingFixture; + +REGISTER_FIXTURE_DATA_TEST_CASE(RunF32, NEPaddingFixture, framework::DatasetMode::ALL, + combine(combine(combine( + Fssd_25_8bit_ShapesDataset, + framework::dataset::make("DataType", { DataType::F32 })), + PaddingSizesDataset), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT }))); + +REGISTER_FIXTURE_DATA_TEST_CASE(RunQASYMM8, NEPaddingFixture, framework::DatasetMode::ALL, + combine(combine(combine( + Fssd_25_8bit_ShapesDataset, + framework::dataset::make("DataType", { DataType::QASYMM8 })), + PaddingSizesDataset), + framework::dataset::make("PaddingMode", { PaddingMode::CONSTANT, PaddingMode::REFLECT }))); + +TEST_SUITE_END() // PadLayer +TEST_SUITE_END() // NEON +} // namespace benchmark +} // namespace test +} // namespace arm_compute diff --git a/tests/benchmark/fixtures/PadLayerFixture.h b/tests/benchmark/fixtures/PadLayerFixture.h new file mode 100644 index 0000000000..2f482a0abf --- /dev/null +++ b/tests/benchmark/fixtures/PadLayerFixture.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_PADLAYERFIXTURE +#define ARM_COMPUTE_TEST_PADLAYERFIXTURE + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "tests/Globals.h" +#include "tests/Utils.h" +#include "tests/framework/Fixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace benchmark +{ +/** Fixture that can be used for NEON and CL */ + +template +class PaddingFixture : public framework::Fixture +{ +public: + template + void setup(TensorShape shape, DataType data_type, const PaddingList &paddings, const PaddingMode mode) + { + PaddingList clamped_padding = paddings; + if(mode != PaddingMode::CONSTANT) + { + // Clamp padding to prevent applying more than is possible. + for(uint32_t i = 0; i < paddings.size(); ++i) + { + if(mode == PaddingMode::REFLECT) + { + clamped_padding[i].first = std::min(static_cast(paddings[i].first), static_cast(shape[i] - 1)); + clamped_padding[i].second = std::min(static_cast(paddings[i].second), static_cast(shape[i] - 1)); + } + else + { + clamped_padding[i].first = std::min(static_cast(paddings[i].first), static_cast(shape[i])); + clamped_padding[i].second = std::min(static_cast(paddings[i].second), static_cast(shape[i])); + } + } + } + + const PixelValue const_value = PixelValue(static_cast(0)); + + TensorShape output_shape = arm_compute::misc::shape_calculator::compute_padded_shape(shape, paddings); + + // Create tensors + src = create_tensor(shape, data_type); + dst = create_tensor(output_shape, data_type); + + // Create and configure function + pad_layer.configure(&src, &dst, paddings, const_value, mode); + + // Allocate tensors + src.allocator()->allocate(); + dst.allocator()->allocate(); + } + + void run() + { + pad_layer.run(); + } + + void sync() + { + sync_if_necessary(); + sync_tensor_if_necessary(dst); + } + + void teardown() + { + src.allocator()->free(); + dst.allocator()->free(); + } + +private: + TensorType src{}; + TensorType dst{}; + Function pad_layer{}; +}; +} // namespace benchmark +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_PADLAYERFIXTURE */ -- cgit v1.2.1