From 338595bca8ab60492f10626860acb1ab3722b1ce Mon Sep 17 00:00:00 2001 From: Vidhya Sudhan Loganathan Date: Fri, 28 Jun 2019 14:09:53 +0100 Subject: COMPMID-2234 : Add support for axis 3 in NE/CLConcatenateLayer Change-Id: Ic86f89ece3afe72809bc69c6de6fee7d21daa1d4 Signed-off-by: Vidhya Sudhan Loganathan Reviewed-on: https://review.mlplatform.org/c/1440 Comments-Addressed: Arm Jenkins Reviewed-by: Gian Marco Iodice Tested-by: Arm Jenkins --- tests/validation/CL/BatchConcatenateLayer.cpp | 170 ++++++++++++++++++++++++ tests/validation/NEON/BatchConcatenateLayer.cpp | 154 +++++++++++++++++++++ tests/validation/reference/ConcatenateLayer.cpp | 10 ++ 3 files changed, 334 insertions(+) create mode 100644 tests/validation/CL/BatchConcatenateLayer.cpp create mode 100644 tests/validation/NEON/BatchConcatenateLayer.cpp (limited to 'tests') diff --git a/tests/validation/CL/BatchConcatenateLayer.cpp b/tests/validation/CL/BatchConcatenateLayer.cpp new file mode 100644 index 0000000000..b789569155 --- /dev/null +++ b/tests/validation/CL/BatchConcatenateLayer.cpp @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/CL/CLTensor.h" +#include "arm_compute/runtime/CL/CLTensorAllocator.h" +#include "arm_compute/runtime/CL/functions/CLConcatenateLayer.h" +#include "tests/CL/CLAccessor.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/ConcatenateLayerFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +TEST_SUITE(CL) +TEST_SUITE(BatchConcatenateLayer) + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( + framework::dataset::make("InputInfo1", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), // Mismatching data type input/output + TensorInfo(TensorShape(20U, 27U, 4U, 4U), 1, DataType::F32), // Mismatching x dimension + TensorInfo(TensorShape(23U, 26U, 4U, 3U), 1, DataType::F32), // Mismatching y dim + TensorInfo(TensorShape(23U, 27U, 4U, 3U), 1, DataType::F32), // Mismatching z dim + TensorInfo(TensorShape(16U, 27U, 3U, 6U), 1, DataType::F32) + }), + framework::dataset::make("InputInfo2", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 27U, 3U, 3U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 27U, 3U, 6U), 1, DataType::F32) + })), + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F16), + TensorInfo(TensorShape(23U, 12U, 4U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 20U, 4U, 3U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 27U, 3U, 12U), 1, DataType::F32) + })), + framework::dataset::make("Expected", { false, false, false, false, true })), + input_info1, input_info2, output_info,expected) +{ + std::vector inputs_vector_info; + inputs_vector_info.emplace_back(std::move(input_info1)); + inputs_vector_info.emplace_back(std::move(input_info2)); + + std::vector inputs_vector_info_raw; + inputs_vector_info_raw.reserve(inputs_vector_info.size()); + for(auto &input : inputs_vector_info) + { + inputs_vector_info_raw.emplace_back(&input); + } + + bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), 3)); + ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + +TEST_CASE(Configuration, framework::DatasetMode::ALL) +{ + // Create tensors + CLTensor src1 = create_tensor(TensorShape(128U, 32U, 32U), DataType::F32, 1); + CLTensor src2 = create_tensor(TensorShape(128U, 32U, 32U), DataType::F32, 1); + CLTensor src3 = create_tensor(TensorShape(128U, 32U, 32U), DataType::F32, 1); + CLTensor dst; + + ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(src3.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Create and configure function + CLConcatenateLayer concat_layer; + + concat_layer.configure({ &src1, &src2, &src3 }, &dst, 3); +} +template +using CLBatchConcatenateLayerFixture = ConcatenateLayerValidationFixture; + +TEST_SUITE(Float) +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()), + framework::dataset::make("DataType", + DataType::F16)), + framework::dataset::make("Axis", 3))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(concat(datasets::Large3DShapes(), datasets::Small4DShapes()), + framework::dataset::make("DataType", + DataType::F16)), + framework::dataset::make("Axis", 3))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() + +TEST_SUITE(FP32) +FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()), + framework::dataset::make("DataType", + DataType::F32)), + framework::dataset::make("Axis", 3))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), framework::dataset::make("DataType", + DataType::F32)), + framework::dataset::make("Axis", 3))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() +TEST_SUITE_END() + +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()), + framework::dataset::make("DataType", + DataType::QASYMM8)), + framework::dataset::make("Axis", 3))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), framework::dataset::make("DataType", + DataType::QASYMM8)), + framework::dataset::make("Axis", 3))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() +TEST_SUITE_END() + +TEST_SUITE_END() +TEST_SUITE_END() +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/NEON/BatchConcatenateLayer.cpp b/tests/validation/NEON/BatchConcatenateLayer.cpp new file mode 100644 index 0000000000..f95663dbd3 --- /dev/null +++ b/tests/validation/NEON/BatchConcatenateLayer.cpp @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/functions/NEConcatenateLayer.h" +#include "arm_compute/runtime/Tensor.h" +#include "arm_compute/runtime/TensorAllocator.h" +#include "tests/NEON/Accessor.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/ConcatenateLayerFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +TEST_SUITE(NEON) +TEST_SUITE(BatchConcatenateLayer) + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( + framework::dataset::make("InputInfo1", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), // Mismatching data type input/output + TensorInfo(TensorShape(20U, 27U, 4U, 4U), 1, DataType::F32), // Mismatching x dimension + TensorInfo(TensorShape(23U, 26U, 4U, 3U), 1, DataType::F32), // Mismatching y dim + TensorInfo(TensorShape(23U, 27U, 4U, 3U), 1, DataType::F32), // Mismatching z dim + TensorInfo(TensorShape(16U, 27U, 3U, 6U), 1, DataType::F32) + }), + framework::dataset::make("InputInfo2", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 27U, 3U, 3U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 27U, 3U, 6U), 1, DataType::F32) + })), + framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F16), + TensorInfo(TensorShape(23U, 12U, 4U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32), + TensorInfo(TensorShape(23U, 20U, 4U, 3U), 1, DataType::F32), + TensorInfo(TensorShape(16U, 27U, 3U, 12U), 1, DataType::F32) + })), + framework::dataset::make("Expected", { false, false, false, false, true })), + input_info1, input_info2, output_info,expected) +{ + std::vector inputs_vector_info; + inputs_vector_info.emplace_back(std::move(input_info1)); + inputs_vector_info.emplace_back(std::move(input_info2)); + + std::vector inputs_vector_info_raw; + inputs_vector_info_raw.reserve(inputs_vector_info.size()); + for(auto &input : inputs_vector_info) + { + inputs_vector_info_raw.emplace_back(&input); + } + + bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), 3)); + ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + +template +using NEBatchConcatenateLayerFixture = ConcatenateLayerValidationFixture; + +TEST_SUITE(Float) +#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, NEBatchConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small2DShapes(), datasets::Tiny4DShapes()), + framework::dataset::make("DataType", + DataType::F16)), + framework::dataset::make("Axis", 3))) +{ + // Validate output + validate(Accessor(_target), _reference); +} +FIXTURE_DATA_TEST_CASE(RunLarge, NEBatchConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), framework::dataset::make("DataType", + DataType::F16)), + framework::dataset::make("Axis", 3))) +{ + // Validate output + validate(Accessor(_target), _reference); +} +TEST_SUITE_END() +#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */ + +TEST_SUITE(FP32) +FIXTURE_DATA_TEST_CASE(RunSmall, NEBatchConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()), + framework::dataset::make("DataType", + DataType::F32)), + framework::dataset::make("Axis", 3))) +{ + // Validate output + validate(Accessor(_target), _reference); +} +FIXTURE_DATA_TEST_CASE(RunLarge, NEBatchConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), framework::dataset::make("DataType", + DataType::F32)), + framework::dataset::make("Axis", 3))) +{ + // Validate output + validate(Accessor(_target), _reference); +} +TEST_SUITE_END() +TEST_SUITE_END() + +TEST_SUITE(Quantized) +TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunSmall, NEBatchConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()), + framework::dataset::make("DataType", + DataType::QASYMM8)), + framework::dataset::make("Axis", 3))) +{ + // Validate output + validate(Accessor(_target), _reference); +} +FIXTURE_DATA_TEST_CASE(RunLarge, NEBatchConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), + framework::dataset::make("DataType", + DataType::QASYMM8)), + framework::dataset::make("Axis", 3))) +{ + // Validate output + validate(Accessor(_target), _reference); +} +TEST_SUITE_END() +TEST_SUITE_END() + +TEST_SUITE_END() +TEST_SUITE_END() +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/reference/ConcatenateLayer.cpp b/tests/validation/reference/ConcatenateLayer.cpp index 6c90d74a0f..aa74ca2474 100644 --- a/tests/validation/reference/ConcatenateLayer.cpp +++ b/tests/validation/reference/ConcatenateLayer.cpp @@ -127,6 +127,16 @@ SimpleTensor concatenate_layer(std::vector> &srcs, SimpleTens dst = reference::permute(dst, PermutationVector(2U, 1U, 0U)); return reference::permute(widthconcatenate_layer(srcs, dst), PermutationVector(2U, 1U, 0U)); } + case 3: + { + for(auto &t : srcs) + { + t = reference::permute(t, PermutationVector(3U, 2U, 1U, 0U)); + } + dst = reference::permute(dst, PermutationVector(3U, 2U, 1U, 0U)); + auto ret = reference::permute(widthconcatenate_layer(srcs, dst), PermutationVector(3U, 2U, 1U, 0U)); + return ret; + } default: { ARM_COMPUTE_ERROR("Not supported"); -- cgit v1.2.1