From 920f2b6c2070f6328891e26538e8bcad63e2a79c Mon Sep 17 00:00:00 2001 From: Pablo Marquez Tello Date: Wed, 27 Apr 2022 11:46:31 +0100 Subject: Add support for 2d and 3d indices for axis 0 * Partially resolves COMPMID-5055 Change-Id: Id05374b8c69e6b9ab4c2790a4de93d7172063b71 Signed-off-by: Pablo Marquez Tello Change-Id: Ic6e2c2d1d34abbf6222c8d56859514e267447266 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7488 Tested-by: Arm Jenkins Reviewed-by: Giorgio Arena Comments-Addressed: Arm Jenkins --- arm_compute/core/utils/misc/ShapeCalculator.h | 19 +++- src/core/NEON/kernels/NEGatherKernel.cpp | 129 ++++++++++++++++++++------ src/core/NEON/kernels/NEGatherKernel.h | 15 ++- tests/datasets/GatherDataset.h | 14 ++- tests/validation/NEON/Gather.cpp | 10 +- tests/validation/reference/Gather.cpp | 41 +++++--- 6 files changed, 176 insertions(+), 52 deletions(-) diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h index df907c106e..aa51ad209a 100644 --- a/arm_compute/core/utils/misc/ShapeCalculator.h +++ b/arm_compute/core/utils/misc/ShapeCalculator.h @@ -1496,13 +1496,24 @@ inline TensorShape compute_pool3d_shape(const TensorShape &src, Pooling3dLayerIn inline TensorShape compute_gather_shape(const TensorShape &input_shape, const TensorShape &indices_shape, uint32_t actual_axis) { - ARM_COMPUTE_ERROR_ON(indices_shape.num_dimensions() > 1); ARM_COMPUTE_ERROR_ON(input_shape.num_dimensions() > 4); ARM_COMPUTE_ERROR_ON(actual_axis >= input_shape.num_dimensions()); - TensorShape output_shape = input_shape; - output_shape[actual_axis] = indices_shape[0]; - + TensorShape output_shape = input_shape; + if(indices_shape.num_dimensions() == 1u) + { + output_shape[actual_axis] = indices_shape[0]; + } + else + { + const auto inddims{ indices_shape.num_dimensions() }; + output_shape.shift_right(indices_shape.num_dimensions() - 1); + output_shape[0] = input_shape[0]; + for(size_t idx(1); (idx - 1) < inddims; ++idx) + { + output_shape.set(actual_axis + idx, indices_shape[idx - 1], false); + } + } return output_shape; } } // namespace shape_calculator diff --git a/src/core/NEON/kernels/NEGatherKernel.cpp b/src/core/NEON/kernels/NEGatherKernel.cpp index 7090da8015..8d86a22b7e 100644 --- a/src/core/NEON/kernels/NEGatherKernel.cpp +++ b/src/core/NEON/kernels/NEGatherKernel.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -47,16 +47,21 @@ namespace template void validate_indices(const ITensor *indices) { - for(size_t i = 0; i < indices->info()->tensor_shape()[0]; ++i) + auto *indices_ptr = (reinterpret_cast( indices->buffer() + indices->info()->offset_first_element_in_bytes() )); + for(size_t i = 0; i < indices->info()->total_size(); ++i) { - ARM_COMPUTE_ERROR_ON(*(reinterpret_cast(indices->ptr_to_element(Coordinates(i)))) < 0); + const U index_value = indices_ptr[i]; + ARM_COMPUTE_UNUSED(index_value); + if(index_value < 0) + { + ARM_COMPUTE_ERROR_ON(index_value < 0); + } } } Status validate_arguments(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis) { ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, indices, output); - ARM_COMPUTE_RETURN_ERROR_ON(indices->num_dimensions() > 1); ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); if(axis < 0) @@ -65,6 +70,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *indices, } ARM_COMPUTE_RETURN_ERROR_ON(0 > axis || axis >= static_cast(input->num_dimensions())); + ARM_COMPUTE_RETURN_ERROR_ON(axis != 0 && indices->num_dimensions() > 1); ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN); if(output->total_size() != 0) @@ -86,6 +92,40 @@ NEGatherKernel::NEGatherKernel() { } +template +inline void NEGatherKernel::gather_dims_0_axis(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON(_indices->info()->num_dimensions() < 2); + validate_indices(_indices); + + Window output_window{ window }; + output_window.set(Window::DimX, Window::Dimension(0, 1, 1)); + Iterator output_it(_output, output_window); + + const uint8_t *const in_ptr_start = _input->buffer() + _input->info()->offset_first_element_in_bytes(); + const uint32_t input_stride_y = _input->info()->strides_in_bytes()[1]; + const uint32_t output_stride_y = _output->info()->strides_in_bytes()[1]; + + const U *const dex_ptr_start = reinterpret_cast(_indices->buffer() + _indices->info()->offset_first_element_in_bytes()); + execute_window_loop(output_window, [&](const Coordinates & id) + { + const auto new_index = *(dex_ptr_start + id.y() + id.z() * _output->info()->tensor_shape()[1] + id[3] * _indices->info()->tensor_shape()[1] * _indices->info()->tensor_shape()[0]); + U *out_ptr = reinterpret_cast(output_it.ptr()); + const char *const in_ptr = reinterpret_cast(in_ptr_start + new_index * input_stride_y); + memcpy(out_ptr, in_ptr, output_stride_y); + }, + output_it); +} + +template +inline void NEGatherKernel::gather_dims_n_axis(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_UNUSED(window); + ARM_COMPUTE_ERROR("NOT_SUPPORTED!"); +} + template inline void NEGatherKernel::gather_0_axis(const Window &window, const ThreadInfo &info) { @@ -147,38 +187,75 @@ void NEGatherKernel::configure(const ITensor *input, const ITensor *indices, ITe } ARM_COMPUTE_ERROR_ON(0 > _axis || _axis >= static_cast(input->info()->num_dimensions())); - if(0 == _axis) + if(indices->info()->num_dimensions() == 1u) { - switch(_indices->info()->data_type()) + if(0 == _axis) + { + switch(_indices->info()->data_type()) + { + case DataType::U32: + _func = &NEGatherKernel::gather_0_axis; + break; + case DataType::S32: + _func = &NEGatherKernel::gather_0_axis; + break; + default: + ARM_COMPUTE_ERROR("Not supported"); + break; + } + } + else { - case DataType::U32: - _func = &NEGatherKernel::gather_0_axis; - break; - case DataType::S32: - _func = &NEGatherKernel::gather_0_axis; - break; - default: - ARM_COMPUTE_ERROR("Not supported"); - break; + switch(_indices->info()->data_type()) + { + case DataType::U32: + _func = &NEGatherKernel::gather_n_axis; + break; + case DataType::S32: + _func = &NEGatherKernel::gather_n_axis; + break; + default: + ARM_COMPUTE_ERROR("Not supported"); + break; + } } } else { - switch(_indices->info()->data_type()) + if(0 == _axis) { - case DataType::U32: - _func = &NEGatherKernel::gather_n_axis; - break; - case DataType::S32: - _func = &NEGatherKernel::gather_n_axis; - break; - default: - ARM_COMPUTE_ERROR("Not supported"); - break; + switch(_indices->info()->data_type()) + { + case DataType::U32: + _func = &NEGatherKernel::gather_dims_0_axis; + break; + case DataType::S32: + _func = &NEGatherKernel::gather_dims_0_axis; + break; + default: + ARM_COMPUTE_ERROR("Not supported"); + break; + } + } + else + { + switch(_indices->info()->data_type()) + { + case DataType::U32: + _func = &NEGatherKernel::gather_dims_n_axis; + break; + case DataType::S32: + _func = &NEGatherKernel::gather_dims_n_axis; + break; + default: + ARM_COMPUTE_ERROR("Not supported"); + break; + } } } + // Output auto initialization if not yet initialized - TensorShape output_shape = arm_compute::misc::shape_calculator::compute_gather_shape(input->info()->tensor_shape(), indices->info()->tensor_shape(), _axis); + const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_gather_shape(input->info()->tensor_shape(), indices->info()->tensor_shape(), _axis); auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape)); // Create window diff --git a/src/core/NEON/kernels/NEGatherKernel.h b/src/core/NEON/kernels/NEGatherKernel.h index 0711f8190b..fc0e67854b 100644 --- a/src/core/NEON/kernels/NEGatherKernel.h +++ b/src/core/NEON/kernels/NEGatherKernel.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2021 Arm Limited. + * Copyright (c) 2019-2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -85,18 +85,23 @@ private: * * For gather on the 0 axis an element by element copy is performed. * - * @param[in] window Region on which to execute the kernel. (Must be a region of the window returned by window()) - * @param[in] info Info about executing thread and CPU. + * @param[in] window Region on which to run the kernel. (Must be a region of the window returned by window()) + * @param[in] info Info about running thread and CPU. */ template void gather_0_axis(const Window &window, const ThreadInfo &info); + template + void gather_dims_0_axis(const Window &window, const ThreadInfo &info); + + template + void gather_dims_n_axis(const Window &window, const ThreadInfo &info); /** Implementation of the gather operation. * * For 1<=axis a row-wise copy is taking place. * - * @param[in] window Region on which to execute the kernel. (Must be a region of the window returned by window()) - * @param[in] info Info about executing thread and CPU. + * @param[in] window Region on which to run the kernel. (Must be a region of the window returned by window()) + * @param[in] info Info about running thread and CPU. */ template void gather_n_axis(const Window &window, const ThreadInfo &info); diff --git a/tests/datasets/GatherDataset.h b/tests/datasets/GatherDataset.h index 29a99d5239..f4ad7c9497 100644 --- a/tests/datasets/GatherDataset.h +++ b/tests/datasets/GatherDataset.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Arm Limited. + * Copyright (c) 2018-2019, 2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -106,6 +106,18 @@ private: std::vector _axis{}; }; + +class SmallGatherMultiDimIndicesDataset final : public GatherDataset +{ +public: + SmallGatherMultiDimIndicesDataset() + { + add_config(TensorShape(15U, 15U), TensorShape(4U, 13U, 2U), 0); + add_config(TensorShape(15U, 15U), TensorShape(2U, 12U), 0); + } +}; + + class SmallGatherDataset final : public GatherDataset { public: diff --git a/tests/validation/NEON/Gather.cpp b/tests/validation/NEON/Gather.cpp index ca1e166bd1..1ab2668aad 100644 --- a/tests/validation/NEON/Gather.cpp +++ b/tests/validation/NEON/Gather.cpp @@ -100,12 +100,14 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( template using NEGatherFixture = GatherFixture; +const auto gather_small_shapes = arm_compute::test::framework::dataset::concat(datasets::SmallGatherDataset(),datasets::SmallGatherMultiDimIndicesDataset()); + TEST_SUITE(Float) TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, NEGatherFixture, framework::DatasetMode::PRECOMMIT, - combine(datasets::SmallGatherDataset(), framework::dataset::make("DataType", DataType::F16))) + combine( gather_small_shapes, framework::dataset::make("DataType", DataType::F16))) { // Validate output validate(Accessor(_target), _reference); @@ -125,7 +127,7 @@ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, NEGatherFixture, framework::DatasetMode::PRECOMMIT, - combine(datasets::SmallGatherDataset(), framework::dataset::make("DataType", DataType::F32))) + combine(gather_small_shapes, framework::dataset::make("DataType", DataType::F32))) { // Validate output validate(Accessor(_target), _reference); @@ -146,7 +148,7 @@ TEST_SUITE(U8) FIXTURE_DATA_TEST_CASE(RunSmall, NEGatherFixture, framework::DatasetMode::PRECOMMIT, - combine(datasets::SmallGatherDataset(), framework::dataset::make("DataType", DataType::U8))) + combine(gather_small_shapes, framework::dataset::make("DataType", DataType::U8))) { // Validate output validate(Accessor(_target), _reference); @@ -166,7 +168,7 @@ TEST_SUITE(U16) FIXTURE_DATA_TEST_CASE(RunSmall, NEGatherFixture, framework::DatasetMode::PRECOMMIT, - combine(datasets::SmallGatherDataset(), framework::dataset::make("DataType", DataType::U16))) + combine(gather_small_shapes, framework::dataset::make("DataType", DataType::U16))) { // Validate output validate(Accessor(_target), _reference); diff --git a/tests/validation/reference/Gather.cpp b/tests/validation/reference/Gather.cpp index 93ac09cf95..02292fb74e 100644 --- a/tests/validation/reference/Gather.cpp +++ b/tests/validation/reference/Gather.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Arm Limited. + * Copyright (c) 2018-2019, 2022 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -45,22 +45,39 @@ SimpleTensor gather(const SimpleTensor &src, const SimpleTensor Window win; win.use_tensor_dimensions(dst_shape); - execute_window_loop(win, [&](const Coordinates & id) + if(indices.shape().num_dimensions() == 1u) { - Coordinates offset; - for(unsigned int dim = 0; dim < id.num_dimensions(); ++dim) + execute_window_loop(win, [&](const Coordinates & id) { - if(dim == actual_axis) + Coordinates offset; + for(unsigned int dim = 0; dim < id.num_dimensions(); ++dim) { - offset.set(dim, indices_ptr[id[dim]]); + if(dim == actual_axis) + { + offset.set(dim, indices_ptr[id[dim]]); + } + else + { + offset.set(dim, id[dim]); + } } - else + *reinterpret_cast(dst(id)) = *reinterpret_cast(src(offset)); + }); + } + else + { + if(actual_axis == 0) + { + win.set(Window::DimX, Window::Dimension(0, 1, 1)); + uint32_t index = 0; + execute_window_loop(win, [&](const Coordinates & id) { - offset.set(dim, id[dim]); - } + auto *dst_ptr = reinterpret_cast(dst(id)); + const int row_to_copy = indices[index++]; + std::copy_n(src.data() + row_to_copy * src.shape()[0], src.shape()[0], dst_ptr); + }); } - *reinterpret_cast(dst(id)) = *reinterpret_cast(src(offset)); - }); + } return dst; } @@ -72,4 +89,4 @@ template SimpleTensor gather(const SimpleTensor &src, const Si } // namespace reference } // namespace validation } // namespace test -} // namespace arm_compute \ No newline at end of file +} // namespace arm_compute -- cgit v1.2.1