From 8c571692a8236be8605a753e231d240094428be5 Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Fri, 5 Apr 2019 11:29:52 +0100 Subject: COMPMID-2062 Rework CL ConcatenateLayer Change-Id: I5a60cd0e822b8912132a6785057921bbf6ef8f7f Signed-off-by: Michalis Spyrou Reviewed-on: https://review.mlplatform.org/c/951 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio --- .../runtime/CL/functions/CLConcatenateLayer.h | 10 +- .../runtime/CL/functions/CLDepthConcatenateLayer.h | 4 +- .../runtime/CL/functions/CLWidthConcatenateLayer.h | 4 +- src/runtime/CL/functions/CLConcatenateLayer.cpp | 210 ++++++++++++--------- tests/validation/CL/DepthConcatenateLayer.cpp | 60 +++--- 5 files changed, 166 insertions(+), 122 deletions(-) diff --git a/arm_compute/runtime/CL/functions/CLConcatenateLayer.h b/arm_compute/runtime/CL/functions/CLConcatenateLayer.h index 25a0153f51..5cf09c8ee0 100644 --- a/arm_compute/runtime/CL/functions/CLConcatenateLayer.h +++ b/arm_compute/runtime/CL/functions/CLConcatenateLayer.h @@ -77,13 +77,9 @@ public: void run() override; private: - void configure_h_concatenate(std::vector inputs_vector, ICLTensor *output); - static Status validate_h_concatenate(const std::vector &inputs_vector, const ITensorInfo *output); - - std::unique_ptr _concat_function; - std::unique_ptr _hconcat_kernels; - unsigned int _num_inputs; - unsigned int _axis; + std::vector> _concat_kernels; + unsigned int _num_inputs; + unsigned int _axis; }; } #endif /* __ARM_COMPUTE_CLCONCATENATELAYER_H__ */ diff --git a/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h b/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h index aef5d63654..33f751f6db 100644 --- a/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h +++ b/arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -40,6 +40,8 @@ namespace arm_compute class ICLTensor; /** Basic function to execute concatenate tensors along z axis. This function calls the following kernels: + * + * @deprecated This function is deprecated and will be removed in release 19.08 * * -# @ref CLFillBorderKernel (executed if input's lowest two dimensions are smaller than respective output's dimensions) * -# @ref CLDepthConcatenateLayerKernel diff --git a/arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h b/arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h index 55b65dadc4..a87ec3094c 100644 --- a/arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h +++ b/arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 ARM Limited. + * Copyright (c) 2018-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -41,6 +41,8 @@ namespace arm_compute class ICLTensor; /** Basic function to execute concatenate tensors along x axis. This function calls the following kernel: + * + * @deprecated This function is deprecated and will be removed in release 19.08 * * -# @ref CLWidthConcatenateLayerKernel * -# @ref CLWidthConcatenate2TensorsKernel (if there are exactly 2 input tensors) diff --git a/src/runtime/CL/functions/CLConcatenateLayer.cpp b/src/runtime/CL/functions/CLConcatenateLayer.cpp index 13164fdff9..7edea3efac 100644 --- a/src/runtime/CL/functions/CLConcatenateLayer.cpp +++ b/src/runtime/CL/functions/CLConcatenateLayer.cpp @@ -38,39 +38,16 @@ namespace arm_compute { CLConcatenateLayer::CLConcatenateLayer() - : _concat_function(nullptr), - _hconcat_kernels(), + : _concat_kernels(), _num_inputs(0), _axis(Window::DimX) { } -Status CLConcatenateLayer::validate_h_concatenate(const std::vector &inputs_vector, const ITensorInfo *output) // NOLINT -{ - const unsigned int num_inputs = inputs_vector.size(); - - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); - ARM_COMPUTE_RETURN_ERROR_ON(num_inputs < 2); - - // Output auto inizialitation if not yet initialized - TensorInfo tmp_output_info = *output->clone(); - const TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimY); - auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); - - unsigned int height_offset = 0; - // Validate generic case of WidthConcatenate kernel - for(const auto &input : inputs_vector) - { - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); - ARM_COMPUTE_RETURN_ON_ERROR(CLHeightConcatenateLayerKernel::validate(input, height_offset, &tmp_output_info)); - height_offset += input->dimension(Window::DimY); - } - - return Status{}; -} - -void CLConcatenateLayer::configure_h_concatenate(std::vector inputs_vector, ICLTensor *output) // NOLINT +void CLConcatenateLayer::configure(const std::vector &inputs_vector, ICLTensor *output, DataLayoutDimension axis) { + ARM_COMPUTE_ERROR_ON(output == nullptr); + _axis = get_data_layout_dimension_index(output->info()->data_layout(), axis); _num_inputs = inputs_vector.size(); std::vector inputs_vector_info(inputs_vector.size()); @@ -79,103 +56,166 @@ void CLConcatenateLayer::configure_h_concatenate(std::vector inputs ARM_COMPUTE_ERROR_ON_NULLPTR(t); return t->info(); }); - - const TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimY); + TensorShape output_shape{}; + if(_axis == Window::DimZ) + { + output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); + } + else + { + output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis); + } // Output auto inizialitation if not yet initialized auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type()); + ARM_COMPUTE_ERROR_THROW_ON(CLConcatenateLayer::validate(inputs_vector_info, output->info(), axis)); - ARM_COMPUTE_ERROR_THROW_ON(CLConcatenateLayer::validate_h_concatenate(inputs_vector_info, output->info())); - - // Configure generic case WidthConcatenate kernels - _hconcat_kernels = arm_compute::support::cpp14::make_unique(_num_inputs); - - unsigned int height_offset = 0; - unsigned int i = 0; - std::transform(inputs_vector.begin(), inputs_vector.end(), inputs_vector.begin(), [&](ICLTensor * t) - { - auto &kernel = _hconcat_kernels[i++]; - kernel.configure(t, height_offset, output); - height_offset += t->info()->dimension(Window::DimY); - return t; - }); -} - -void CLConcatenateLayer::configure(const std::vector &inputs_vector, ICLTensor *output, DataLayoutDimension axis) -{ - ARM_COMPUTE_ERROR_ON(output == nullptr); - _axis = get_data_layout_dimension_index(output->info()->data_layout(), axis); + unsigned int offset = 0; switch(_axis) { - case 0: + case Window::DimX: { - auto func = support::cpp14::make_unique(); - func->configure(inputs_vector, output); - _concat_function = std::move(func); + switch(_num_inputs) + { + case 2: + { + // Configure WidthConcatenate2Tensors kernel + auto kernel = support::cpp14::make_unique(); + kernel->configure(inputs_vector.at(0), inputs_vector.at(1), output); + _concat_kernels.emplace_back(std::move(kernel)); + break; + } + case 4: + { + // Configure WidthConcatenate4Tensors kernel + auto kernel = support::cpp14::make_unique(); + kernel->configure(inputs_vector.at(0), inputs_vector.at(1), inputs_vector.at(2), inputs_vector.at(3), output); + _concat_kernels.emplace_back(std::move(kernel)); + break; + } + default: + { + // Configure generic case WidthConcatenate kernels + for(unsigned int i = 0; i < _num_inputs; ++i) + { + auto kernel = support::cpp14::make_unique(); + kernel->configure(inputs_vector.at(i), offset, output); + offset += inputs_vector.at(i)->info()->dimension(_axis); + _concat_kernels.emplace_back(std::move(kernel)); + } + break; + } + } break; } - case 1: + case Window::DimY: { - configure_h_concatenate(inputs_vector, output); + for(unsigned int i = 0; i < _num_inputs; ++i) + { + auto kernel = support::cpp14::make_unique(); + kernel->configure(inputs_vector.at(i), offset, output); + offset += inputs_vector.at(i)->info()->dimension(_axis); + _concat_kernels.emplace_back(std::move(kernel)); + } break; } - case 2: + case Window::DimZ: { - auto func = support::cpp14::make_unique(); - func->configure(inputs_vector, output); - _concat_function = std::move(func); + for(unsigned int i = 0; i < _num_inputs; ++i) + { + auto kernel = support::cpp14::make_unique(); + kernel->configure(inputs_vector.at(i), offset, output); + offset += inputs_vector.at(i)->info()->dimension(_axis); + _concat_kernels.emplace_back(std::move(kernel)); + } break; } default: - ARM_COMPUTE_ERROR("Concatenation is supported across width, height and depth only!"); + ARM_COMPUTE_ERROR("Axis not supported"); } } Status CLConcatenateLayer::validate(const std::vector &inputs_vector, const ITensorInfo *output, DataLayoutDimension axis) { ARM_COMPUTE_RETURN_ERROR_ON(output == nullptr); + const unsigned int num_inputs = inputs_vector.size(); - switch(get_data_layout_dimension_index(output->data_layout(), axis)) + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); + ARM_COMPUTE_RETURN_ERROR_ON(num_inputs < 2); + const unsigned int _axis = get_data_layout_dimension_index(inputs_vector[0]->data_layout(), axis); + + // Output auto inizialitation if not yet initialized + TensorInfo tmp_output_info = *output->clone(); + TensorShape output_shape{}; + if(_axis == Window::DimZ) { - case 0: - ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenateLayer::validate(inputs_vector, output)); - break; - case 1: - ARM_COMPUTE_RETURN_ON_ERROR(CLConcatenateLayer::validate_h_concatenate(inputs_vector, output)); - break; - case 2: - ARM_COMPUTE_RETURN_ON_ERROR(CLDepthConcatenateLayer::validate(inputs_vector, output)); - break; - default: - ARM_COMPUTE_RETURN_ERROR_MSG("Concatenation is supported across width and depth only!"); + output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector); } - return Status{}; -} + else + { + output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis); + } + auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type()); -void CLConcatenateLayer::run() -{ + unsigned int offset = 0; switch(_axis) { - case 0: - case 2: + case Window::DimX: { - ARM_COMPUTE_ERROR_ON(_concat_function == nullptr); - _concat_function->run(); + switch(num_inputs) + { + case 2: + // Validate WidthConcatenate2Tensors kernels if there are 2 inputs + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(inputs_vector[0], inputs_vector[1]); + ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate2TensorsKernel::validate(inputs_vector[0], inputs_vector[1], &tmp_output_info)); + break; + case 4: + // Validate WidthConcatenate4Tensors kernels if there are 4 inputs + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(inputs_vector[0], inputs_vector[1], inputs_vector[2], inputs_vector[3]); + ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenate4TensorsKernel::validate(inputs_vector[0], inputs_vector[1], inputs_vector[2], inputs_vector[3], &tmp_output_info)); + break; + default: + // Validate generic case of WidthConcatenate kernel + for(const auto &input : inputs_vector) + { + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input); + ARM_COMPUTE_RETURN_ON_ERROR(CLWidthConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + offset += input->dimension(_axis); + } + break; + } break; } - case 1: + case Window::DimY: { - for(unsigned int i = 0; i < _num_inputs; ++i) + for(const auto &input : inputs_vector) { - CLScheduler::get().enqueue(_hconcat_kernels[i], true); + ARM_COMPUTE_RETURN_ON_ERROR(CLHeightConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + offset += input->dimension(_axis); } break; } - default: + case Window::DimZ: { - ARM_COMPUTE_ERROR("Axis not supported"); + for(const auto &input : inputs_vector) + { + ARM_COMPUTE_RETURN_ON_ERROR(CLDepthConcatenateLayerKernel::validate(input, offset, &tmp_output_info)); + offset += input->dimension(_axis); + } break; } + default: + ARM_COMPUTE_ERROR("Axis not supported"); + } + + return Status{}; +} + +void CLConcatenateLayer::run() +{ + for(auto &kernel : _concat_kernels) + { + CLScheduler::get().enqueue(*kernel, true); } } } // namespace arm_compute diff --git a/tests/validation/CL/DepthConcatenateLayer.cpp b/tests/validation/CL/DepthConcatenateLayer.cpp index 01477f9fc3..f4a693ca7d 100644 --- a/tests/validation/CL/DepthConcatenateLayer.cpp +++ b/tests/validation/CL/DepthConcatenateLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -24,14 +24,14 @@ #include "arm_compute/core/Types.h" #include "arm_compute/runtime/CL/CLTensor.h" #include "arm_compute/runtime/CL/CLTensorAllocator.h" -#include "arm_compute/runtime/CL/functions/CLDepthConcatenateLayer.h" +#include "arm_compute/runtime/CL/functions/CLConcatenateLayer.h" #include "tests/CL/CLAccessor.h" #include "tests/datasets/ShapeDatasets.h" #include "tests/framework/Asserts.h" #include "tests/framework/Macros.h" #include "tests/framework/datasets/Datasets.h" #include "tests/validation/Validation.h" -#include "tests/validation/fixtures/DepthConcatenateLayerFixture.h" +#include "tests/validation/fixtures/ConcatenateLayerFixture.h" namespace arm_compute { @@ -73,8 +73,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( inputs_vector_info_raw.emplace_back(&input); } - bool is_valid = bool(CLDepthConcatenateLayer::validate(inputs_vector_info_raw, - &output_info.clone()->set_is_resizable(false))); + bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), DataLayoutDimension::CHANNEL)); ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); } // clang-format on @@ -83,37 +82,38 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( TEST_CASE(Configuration, framework::DatasetMode::ALL) { // Create tensors - CLTensor src1 = create_tensor(TensorShape(32U, 32U, 128U), DataType::F32, 1); + CLTensor src1 = create_tensor(TensorShape(128U, 32U, 32U), DataType::F32, 1); CLTensor src2 = create_tensor(TensorShape(32U, 32U, 32U), DataType::F32, 1); + CLTensor src3 = create_tensor(TensorShape(16U, 32U, 32U), DataType::F32, 1); CLTensor dst; ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(src3.info()->is_resizable(), framework::LogLevel::ERRORS); ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); // Create and configure function - CLDepthConcatenateLayer concat_layer; + CLConcatenateLayer concat_layer; - concat_layer.configure({ &src1, &src2 }, &dst); - - // Validate valid region - const ValidRegion valid_region = shape_to_valid_region(TensorShape(32U, 32U, 160U)); - validate(dst.info()->valid_region(), valid_region); + concat_layer.configure({ &src1, &src2, &src3 }, &dst, DataLayoutDimension::CHANNEL); } - template -using CLDepthConcatenateLayerFixture = DepthConcatenateLayerValidationFixture; +using CLDepthConcatenateLayerFixture = ConcatenateLayerValidationFixture; TEST_SUITE(Float) TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType", - DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()), + framework::dataset::make("DataType", + DataType::F16)), + framework::dataset::make("Axis", 2))) { // Validate output validate(CLAccessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("DataType", - DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(concat(datasets::Large3DShapes(), datasets::Small4DShapes()), + framework::dataset::make("DataType", + DataType::F16)), + framework::dataset::make("Axis", 2))) { // Validate output validate(CLAccessor(_target), _reference); @@ -121,14 +121,17 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture, framework TEST_SUITE_END() TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType", - DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()), + framework::dataset::make("DataType", + DataType::F32)), + framework::dataset::make("Axis", 2))) { // Validate output validate(CLAccessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), framework::dataset::make("DataType", - DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), framework::dataset::make("DataType", + DataType::F32)), + framework::dataset::make("Axis", 2))) { // Validate output validate(CLAccessor(_target), _reference); @@ -136,17 +139,19 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture, framewor TEST_SUITE_END() TEST_SUITE_END() - TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) -FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType", - DataType::QASYMM8))) +FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()), + framework::dataset::make("DataType", + DataType::QASYMM8)), + framework::dataset::make("Axis", 2))) { // Validate output validate(CLAccessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), framework::dataset::make("DataType", - DataType::QASYMM8))) +FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), framework::dataset::make("DataType", + DataType::QASYMM8)), + framework::dataset::make("Axis", 2))) { // Validate output validate(CLAccessor(_target), _reference); @@ -154,7 +159,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture, framew TEST_SUITE_END() TEST_SUITE_END() - TEST_SUITE_END() TEST_SUITE_END() } // namespace validation -- cgit v1.2.1