From 55b3d1216b4011d86d5f06335e518dc924987ae5 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Wed, 9 May 2018 09:59:23 +0100 Subject: COMPMID-1137 OpenCL concatenate width Change-Id: I40faba421281b1cf080fa6a825d04a4366cdaeb0 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/130700 Reviewed-by: Anthony Barbier Tested-by: Jenkins Reviewed-by: Georgios Pinitas --- tests/validation/CL/WidthConcatenateLayer.cpp | 176 ++++++++++++++++++++++++++ 1 file changed, 176 insertions(+) create mode 100644 tests/validation/CL/WidthConcatenateLayer.cpp (limited to 'tests/validation/CL/WidthConcatenateLayer.cpp') diff --git a/tests/validation/CL/WidthConcatenateLayer.cpp b/tests/validation/CL/WidthConcatenateLayer.cpp new file mode 100644 index 0000000000..0ff95df957 --- /dev/null +++ b/tests/validation/CL/WidthConcatenateLayer.cpp @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/CL/CLTensor.h" +#include "arm_compute/runtime/CL/CLTensorAllocator.h" +#include "arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h" +#include "tests/CL/CLAccessor.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/WidthConcatenateLayerFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +TEST_SUITE(CL) +TEST_SUITE(WidthConcatenateLayer) +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( + framework::dataset::make("InputInfo1", { TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/output + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching y dimension + TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching total width + TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32, 0) + }), + framework::dataset::make("InputInfo2", { TensorInfo(TensorShape(24U, 27U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32, 0), + TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32, 0), + TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32, 0) + })), + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(47U, 27U, 5U), 1, DataType::F16, 0), + TensorInfo(TensorShape(75U, 12U, 5U), 1, DataType::F32, 0), + TensorInfo(TensorShape(11U, 27U, 5U), 1, DataType::F32, 0), + TensorInfo(TensorShape(32U, 27U, 5U), 1, DataType::F32, 0) + })), + framework::dataset::make("Expected", { false, false, false, true })), + input_info1, input_info2, output_info,expected) +{ + std::vector inputs_vector_info; + inputs_vector_info.emplace_back(std::move(input_info1)); + inputs_vector_info.emplace_back(std::move(input_info2)); + + std::vector inputs_vector_info_raw; + for(auto &input : inputs_vector_info) + { + inputs_vector_info_raw.emplace_back(&input); + } + + bool is_valid = bool(CLWidthConcatenateLayer::validate(inputs_vector_info_raw, + &output_info.clone()->set_is_resizable(false))); + ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + +TEST_CASE(Configuration, framework::DatasetMode::ALL) +{ + // Create tensors + CLTensor src1 = create_tensor(TensorShape(128U, 32U, 32U), DataType::F32, 1); + CLTensor src2 = create_tensor(TensorShape(32U, 32U, 32U), DataType::F32, 1); + CLTensor src3 = create_tensor(TensorShape(15U, 32U, 32U), DataType::F32, 1); + CLTensor dst; + + ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(src3.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Create and configure function + CLWidthConcatenateLayer concat_layer; + + concat_layer.configure({ &src1, &src2, &src3 }, &dst); +} + +template +using CLWidthConcatenateLayerFixture = WidthConcatenateLayerValidationFixture; + +TEST_SUITE(Float) +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLWidthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType", + DataType::F16))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +FIXTURE_DATA_TEST_CASE(RunLarge, CLWidthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("DataType", + DataType::F16))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() + +TEST_SUITE(FP32) +FIXTURE_DATA_TEST_CASE(RunSmall, CLWidthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType", + DataType::F32))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +FIXTURE_DATA_TEST_CASE(RunLarge, CLWidthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::WidthConcatenateLayerShapes(), framework::dataset::make("DataType", + DataType::F32))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() +TEST_SUITE_END() + +TEST_SUITE(Quantized) +TEST_SUITE(QS8) +FIXTURE_DATA_TEST_CASE(RunTiny, CLWidthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(), + framework::dataset::make("DataType", + DataType::QS8))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +FIXTURE_DATA_TEST_CASE(RunSmall, CLWidthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::WidthConcatenateLayerShapes(), + framework::dataset::make("DataType", + DataType::QS8))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() + +TEST_SUITE(QS16) +FIXTURE_DATA_TEST_CASE(RunTiny, CLWidthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(), + framework::dataset::make("DataType", + DataType::QS16))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +FIXTURE_DATA_TEST_CASE(RunSmall, CLWidthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::WidthConcatenateLayerShapes(), + framework::dataset::make("DataType", + DataType::QS16))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() +TEST_SUITE_END() + +TEST_SUITE_END() +TEST_SUITE_END() +} // namespace validation +} // namespace test +} // namespace arm_compute -- cgit v1.2.1