aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2019-04-03 19:48:54 +0100
committerMichalis Spyrou <michalis.spyrou@arm.com>2019-04-05 10:24:49 +0000
commit9f15c5e838085294ce391d3bdcf5ec7752650ea8 (patch)
treec3abb2d06998ffb0a1bf3cb7da52359d6c2d562d
parent7191aaa184992ca1fa8bdbbe5a9d8f9cd093ad5b (diff)
downloadComputeLibrary-9f15c5e838085294ce391d3bdcf5ec7752650ea8.tar.gz
COMPMID-2062 Rework NEON ConcatenateLayer
Change-Id: I2703c99f651e4f06c4e44e39a85a8a8d201c5362 Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-on: https://review.mlplatform.org/c/944 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
-rw-r--r--arm_compute/runtime/NEON/functions/NEConcatenateLayer.h10
-rw-r--r--arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h2
-rw-r--r--src/runtime/NEON/functions/NEConcatenateLayer.cpp174
-rw-r--r--tests/validation/NEON/DepthConcatenateLayer.cpp66
-rw-r--r--tests/validation/reference/ConcatenateLayer.cpp9
6 files changed, 126 insertions, 139 deletions
diff --git a/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
index b70d6ebc7c..7dfbcf9199 100644
--- a/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
@@ -77,13 +77,9 @@ public:
void run() override;
private:
- void configure_h_concatenate(std::vector<ITensor *> inputs_vector, ITensor *output);
- static Status validate_h_concatenate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output);
-
- std::unique_ptr<IFunction> _concat_function;
- std::unique_ptr<NEHeightConcatenateLayerKernel[]> _hconcat_kernels;
- unsigned int _num_inputs;
- unsigned int _axis;
+ std::vector<std::unique_ptr<INEKernel>> _concat_kernels;
+ unsigned int _num_inputs;
+ unsigned int _axis;
};
} // namespace arm_compute
#endif /* __ARM_COMPUTE_NECONCATENATELAYER_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
index e2162ef042..da38151e73 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,6 +41,8 @@ class ITensor;
* -# @ref NEFillBorderKernel (executed if input's lowest two dimensions are smaller than respective output's dimensions)
* -# @ref NEDepthConcatenateLayerKernel
*
+ * @deprecated This function is deprecated and will be removed in release 19.08
+ *
*/
class NEDepthConcatenateLayer : public IFunction
{
diff --git a/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h
index 70a81b2788..0aaba7987f 100644
--- a/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEWidthConcatenateLayer.h
@@ -44,6 +44,8 @@ class ITensor;
/** Basic function to execute concatenate tensors along x axis. This function calls the following kernel:
*
* -# @ref NEWidthConcatenateLayerKernel
+ *
+ * @deprecated This function is deprecated and will be removed in release 19.08
*/
class NEWidthConcatenateLayer : public IFunction
{
diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
index 1897915d33..fa7b91c3ca 100644
--- a/src/runtime/NEON/functions/NEConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
@@ -38,36 +38,16 @@
namespace arm_compute
{
NEConcatenateLayer::NEConcatenateLayer()
- : _concat_function(nullptr),
- _hconcat_kernels(),
+ : _concat_kernels(),
_num_inputs(0),
_axis(Window::DimX)
{
}
-Status NEConcatenateLayer::validate_h_concatenate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output)
-{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
- ARM_COMPUTE_RETURN_ERROR_ON(inputs_vector.size() < 2);
-
- // Output auto inizialitation if not yet initialized
- TensorInfo tmp_output_info = *output->clone();
- TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimY);
- auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type());
-
- unsigned int offset = 0;
- for(const auto &input : inputs_vector)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
- ARM_COMPUTE_RETURN_ON_ERROR(NEHeightConcatenateLayerKernel::validate(input, offset, &tmp_output_info));
- offset += input->dimension(Window::DimY);
- }
-
- return Status{};
-}
-
-void NEConcatenateLayer::configure_h_concatenate(std::vector<ITensor *> inputs_vector, ITensor *output)
+void NEConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vector, ITensor *output, DataLayoutDimension axis)
{
+ ARM_COMPUTE_ERROR_ON(output == nullptr);
+ _axis = get_data_layout_dimension_index(output->info()->data_layout(), axis);
_num_inputs = inputs_vector.size();
std::vector<ITensorInfo *> inputs_vector_info;
@@ -76,98 +56,108 @@ void NEConcatenateLayer::configure_h_concatenate(std::vector<ITensor *> inputs_v
ARM_COMPUTE_ERROR_ON_NULLPTR(inputs_vector.at(i));
inputs_vector_info.emplace_back(inputs_vector.at(i)->info());
}
- TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, Window::DimY);
+ TensorShape output_shape{};
+ if(_axis == Window::DimZ)
+ {
+ output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector);
+ }
+ else
+ {
+ output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis);
+ }
// Output auto inizialitation if not yet initialized
auto_init_if_empty(*output->info(), output_shape, 1, inputs_vector[0]->info()->data_type());
- ARM_COMPUTE_ERROR_THROW_ON(validate_h_concatenate(inputs_vector_info, output->info()));
+ ARM_COMPUTE_ERROR_THROW_ON(NEConcatenateLayer::validate(inputs_vector_info, output->info(), axis));
unsigned int offset = 0;
- _hconcat_kernels = arm_compute::support::cpp14::make_unique<NEHeightConcatenateLayerKernel[]>(_num_inputs);
-
for(unsigned int i = 0; i < _num_inputs; ++i)
{
- _hconcat_kernels[i].configure(inputs_vector.at(i), offset, output);
- offset += inputs_vector.at(i)->info()->dimension(Window::DimY);
- }
-}
-
-void NEConcatenateLayer::configure(const std::vector<ITensor *> &inputs_vector, ITensor *output, DataLayoutDimension axis)
-{
- ARM_COMPUTE_ERROR_ON(output == nullptr);
- _axis = get_data_layout_dimension_index(output->info()->data_layout(), axis);
- switch(_axis)
- {
- case 0:
+ switch(_axis)
{
- auto func = support::cpp14::make_unique<NEWidthConcatenateLayer>();
- func->configure(inputs_vector, output);
- _concat_function = std::move(func);
- break;
- }
- case 1:
- {
- configure_h_concatenate(inputs_vector, output);
- break;
- }
- case 2:
- {
- auto func = support::cpp14::make_unique<NEDepthConcatenateLayer>();
- func->configure(inputs_vector, output);
- _concat_function = std::move(func);
- break;
+ case Window::DimX:
+ {
+ auto kernel = support::cpp14::make_unique<NEWidthConcatenateLayerKernel>();
+ kernel->configure(inputs_vector.at(i), offset, output);
+ _concat_kernels.emplace_back(std::move(kernel));
+ break;
+ }
+ case Window::DimY:
+ {
+ auto kernel = support::cpp14::make_unique<NEHeightConcatenateLayerKernel>();
+ kernel->configure(inputs_vector.at(i), offset, output);
+ _concat_kernels.emplace_back(std::move(kernel));
+ break;
+ }
+ case Window::DimZ:
+ {
+ auto kernel = support::cpp14::make_unique<NEDepthConcatenateLayerKernel>();
+ kernel->configure(inputs_vector.at(i), offset, output);
+ _concat_kernels.emplace_back(std::move(kernel));
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Axis not supported");
}
- default:
- ARM_COMPUTE_ERROR("Concatenation is supported across width, height and depth only!");
+ offset += inputs_vector.at(i)->info()->dimension(_axis);
}
}
Status NEConcatenateLayer::validate(const std::vector<ITensorInfo *> &inputs_vector, const ITensorInfo *output, DataLayoutDimension axis)
{
- ARM_COMPUTE_RETURN_ERROR_ON(output == nullptr);
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
+ ARM_COMPUTE_RETURN_ERROR_ON(inputs_vector.size() < 2);
+ const unsigned int _axis = get_data_layout_dimension_index(inputs_vector[0]->data_layout(), axis);
- switch(get_data_layout_dimension_index(output->data_layout(), axis))
+ // Output auto inizialitation if not yet initialized
+ TensorInfo tmp_output_info = *output->clone();
+ TensorShape output_shape{};
+ if(_axis == Window::DimZ)
{
- case 0:
- ARM_COMPUTE_RETURN_ON_ERROR(NEWidthConcatenateLayer::validate(inputs_vector, output));
- break;
- case 1:
- ARM_COMPUTE_RETURN_ON_ERROR(NEConcatenateLayer::validate_h_concatenate(inputs_vector, output));
- break;
- case 2:
- ARM_COMPUTE_RETURN_ON_ERROR(NEDepthConcatenateLayer::validate(inputs_vector, output));
- break;
- default:
- ARM_COMPUTE_RETURN_ERROR_MSG("Concatenation is supported across width and depth only!");
+ output_shape = arm_compute::misc::shape_calculator::calculate_depth_concatenate_shape(inputs_vector);
}
- return Status{};
-}
+ else
+ {
+ output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, _axis);
+ }
+ auto_init_if_empty(tmp_output_info, output_shape, 1, inputs_vector[0]->data_type());
-void NEConcatenateLayer::run()
-{
- switch(_axis)
+ unsigned int offset = 0;
+ for(const auto &input : inputs_vector)
{
- case 0:
- case 2:
- {
- ARM_COMPUTE_ERROR_ON(_concat_function == nullptr);
- _concat_function->run();
- break;
- }
- case 1:
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
+ switch(_axis)
{
- for(unsigned i = 0; i < _num_inputs; ++i)
+ case Window::DimX:
{
- NEScheduler::get().schedule(_hconcat_kernels.get() + i, Window::DimY);
+ ARM_COMPUTE_RETURN_ON_ERROR(NEWidthConcatenateLayerKernel::validate(input, offset, &tmp_output_info));
+ break;
}
- break;
- }
- default:
- {
- ARM_COMPUTE_ERROR("Axis not supported.");
- break;
+ case Window::DimY:
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(NEHeightConcatenateLayerKernel::validate(input, offset, &tmp_output_info));
+ break;
+ }
+ case Window::DimZ:
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(NEDepthConcatenateLayerKernel::validate(input, offset, &tmp_output_info));
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Axis not supported");
}
+ offset += input->dimension(_axis);
+ }
+
+ return Status{};
+}
+
+void NEConcatenateLayer::run()
+{
+ for(auto &kernel : _concat_kernels)
+ {
+ NEScheduler::get().schedule(kernel.get(), _axis);
}
}
} // namespace arm_compute
diff --git a/tests/validation/NEON/DepthConcatenateLayer.cpp b/tests/validation/NEON/DepthConcatenateLayer.cpp
index 24e7649e7d..1b355ae17d 100644
--- a/tests/validation/NEON/DepthConcatenateLayer.cpp
+++ b/tests/validation/NEON/DepthConcatenateLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,7 +22,7 @@
* SOFTWARE.
*/
#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h"
+#include "arm_compute/runtime/NEON/functions/NEConcatenateLayer.h"
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
@@ -31,7 +31,7 @@
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
#include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/DepthConcatenateLayerFixture.h"
+#include "tests/validation/fixtures/ConcatenateLayerFixture.h"
namespace arm_compute
{
@@ -73,48 +73,29 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
inputs_vector_info_raw.emplace_back(&input);
}
- bool is_valid = bool(NEDepthConcatenateLayer::validate(inputs_vector_info_raw,
- &output_info.clone()->set_is_resizable(false)));
+ bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), DataLayoutDimension::CHANNEL));
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
// clang-format on
// *INDENT-ON*
-TEST_CASE(Configuration, framework::DatasetMode::ALL)
-{
- // Create tensors
- Tensor src1 = create_tensor<Tensor>(TensorShape(32U, 32U, 128U), DataType::F32, 1);
- Tensor src2 = create_tensor<Tensor>(TensorShape(32U, 32U, 32U), DataType::F32, 1);
- Tensor dst;
-
- ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Create and configure function
- NEDepthConcatenateLayer concat_layer;
-
- concat_layer.configure({ &src1, &src2 }, &dst);
-
- // Validate valid region
- const ValidRegion valid_region = shape_to_valid_region(TensorShape(32U, 32U, 160U));
- validate(dst.info()->valid_region(), valid_region);
-}
-
template <typename T>
-using NEDepthConcatenateLayerFixture = DepthConcatenateLayerValidationFixture<Tensor, ITensor, Accessor, NEDepthConcatenateLayer, T>;
+using NEDepthConcatenateLayerFixture = ConcatenateLayerValidationFixture<Tensor, ITensor, Accessor, NEConcatenateLayer, T>;
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType",
- DataType::F16)))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small2DShapes(), datasets::Tiny4DShapes()),
+ framework::dataset::make("DataType",
+ DataType::F16)),
+ framework::dataset::make("Axis", 2)))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), framework::dataset::make("DataType",
- DataType::F16)))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), framework::dataset::make("DataType",
+ DataType::F16)),
+ framework::dataset::make("Axis", 2)))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -123,14 +104,17 @@ TEST_SUITE_END()
#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType",
- DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()),
+ framework::dataset::make("DataType",
+ DataType::F32)),
+ framework::dataset::make("Axis", 2)))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), framework::dataset::make("DataType",
- DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), framework::dataset::make("DataType",
+ DataType::F32)),
+ framework::dataset::make("Axis", 2)))
{
// Validate output
validate(Accessor(_target), _reference);
@@ -140,14 +124,18 @@ TEST_SUITE_END()
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
-FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType",
- DataType::QASYMM8)))
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()),
+ framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ framework::dataset::make("Axis", 2)))
{
// Validate output
validate(Accessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::DepthConcatenateLayerShapes(), framework::dataset::make("DataType",
- DataType::QASYMM8)))
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(),
+ framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ framework::dataset::make("Axis", 2)))
{
// Validate output
validate(Accessor(_target), _reference);
diff --git a/tests/validation/reference/ConcatenateLayer.cpp b/tests/validation/reference/ConcatenateLayer.cpp
index 1440878829..93b1768ebd 100644
--- a/tests/validation/reference/ConcatenateLayer.cpp
+++ b/tests/validation/reference/ConcatenateLayer.cpp
@@ -114,6 +114,15 @@ SimpleTensor<T> concatenate_layer(std::vector<SimpleTensor<T>> &srcs, SimpleTens
dst = reference::permute<T>(dst, PermutationVector(1U, 0U));
return reference::permute<T>(widthconcatenate_layer(srcs, dst), PermutationVector(1U, 0U));
}
+ case Window::DimZ:
+ {
+ for(auto &t : srcs)
+ {
+ t = reference::permute<T>(t, PermutationVector(2U, 1U, 0U));
+ }
+ dst = reference::permute<T>(dst, PermutationVector(2U, 1U, 0U));
+ return reference::permute<T>(widthconcatenate_layer(srcs, dst), PermutationVector(2U, 1U, 0U));
+ }
default:
{
ARM_COMPUTE_ERROR("Not supported");