From 2dce6cc676dbedf90101202c0dd678c24b1e3e43 Mon Sep 17 00:00:00 2001 From: John Kesapides Date: Mon, 14 Jan 2019 09:47:09 +0000 Subject: COMPMID-1763 : NEON: Implement Gather Change-Id: I9a3808315290bd395f5acce4530ab8daccddf8be Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/167195 Tested-by: bsgcomp Reviewed-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/520 Tested-by: Arm Jenkins Reviewed-by: Pablo Marquez --- arm_compute/core/NEON/NEKernels.h | 1 + arm_compute/core/NEON/kernels/NEGatherKernel.h | 112 +++++++++++++ arm_compute/runtime/NEON/NEFunctions.h | 1 + arm_compute/runtime/NEON/functions/NEGather.h | 62 ++++++++ src/core/NEON/kernels/NEGatherKernel.cpp | 203 +++++++++++++++++++++++ src/runtime/NEON/functions/NEGather.cpp | 45 ++++++ tests/validation/NEON/Gather.cpp | 212 +++++++++++++++++++++++++ 7 files changed, 636 insertions(+) create mode 100644 arm_compute/core/NEON/kernels/NEGatherKernel.h create mode 100644 arm_compute/runtime/NEON/functions/NEGather.h create mode 100644 src/core/NEON/kernels/NEGatherKernel.cpp create mode 100644 src/runtime/NEON/functions/NEGather.cpp create mode 100644 tests/validation/NEON/Gather.cpp diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h index b7810cbd51..cc6af39a54 100644 --- a/arm_compute/core/NEON/NEKernels.h +++ b/arm_compute/core/NEON/NEKernels.h @@ -79,6 +79,7 @@ #include "arm_compute/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMMatrixVectorMultiplyKernel.h" #include "arm_compute/core/NEON/kernels/NEGEMMTranspose1xWKernel.h" +#include "arm_compute/core/NEON/kernels/NEGatherKernel.h" #include "arm_compute/core/NEON/kernels/NEGaussian3x3Kernel.h" #include "arm_compute/core/NEON/kernels/NEGaussian5x5Kernel.h" #include "arm_compute/core/NEON/kernels/NEGaussianPyramidKernel.h" diff --git a/arm_compute/core/NEON/kernels/NEGatherKernel.h b/arm_compute/core/NEON/kernels/NEGatherKernel.h new file mode 100644 index 0000000000..667e9812d8 --- /dev/null +++ b/arm_compute/core/NEON/kernels/NEGatherKernel.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __ARM_COMPUTE_NEGATHERKERNEL_H__ +#define __ARM_COMPUTE_NEGATHERKERNEL_H__ + +#include "arm_compute/core/NEON/INEKernel.h" +#include "arm_compute/core/Types.h" + +namespace arm_compute +{ +class ITensor; + +/** Kernel to perform other operation on NEON */ +class NEGatherKernel : public INEKernel +{ +public: + /** Default constructor. */ + NEGatherKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers). */ + NEGatherKernel(const NEGatherKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers). */ + NEGatherKernel &operator=(const NEGatherKernel &) = delete; + /** Allow instances of this class to be moved. */ + NEGatherKernel(NEGatherKernel &&) = default; + /** Allow instances of this class to be moved. */ + NEGatherKernel &operator=(NEGatherKernel &&) = default; + /** Default detructor */ + ~NEGatherKernel() = default; + + /** Name of the kernel + * + * @return Kernel name + */ + const char *name() const override + { + return "NEGatherKernel"; + } + /** Initialise the kernel's inputs and outputs + * + * @param[in] input Source tensor. Supported tensor rank: up to 4. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32 + * @param[in] indices Indices tensor. Supported tensor rank: up to 1. Must be one of the following type: U32/S32. Each value Must be in range [0, input.shape[@p axis]) + * @param[out] output Destination tensor. Data type supported: Same as @p input + * @param[in] axis (Optional) The axis in @p input to gather @p indices from. Negative values wrap around. Defaults to 0 + */ + void configure(const ITensor *input, const ITensor *indices, ITensor *output, int axis = 0); + /** Static function to check if given info will lead to a valid configuration of @ref NEGatherKernel + * + * @param[in] input Source tensor info. Supported tensor rank: up to 4. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32 + * @param[in] indices Indices tensor info. Supported tensor rank: up to 1. Must be one of the following type: U32/S32. Each value Must be in range [0, input.shape[@p axis]) + * @param[in] output Destination tensor info. Data type supported: Same as @p input + * @param[in] axis (Optional) The axis in @p input to gather @p indices from. Negative values wrap around. Defaults to 0 + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis); + + // Inherited methods overridden: + void run(const Window &window, const ThreadInfo &info) override; + +private: + /** Implementation of the gather operation for 0 axis. + * + * For gather on the 0 axis an element by element copy is performed. + * + * @param[in] window Region on which to execute the kernel. (Must be a region of the window returned by window()) + * @param[in] info Info about executing thread and CPU. + */ + template + void gather_0_axis(const Window &window, const ThreadInfo &info); + + /** Implementation of the gather operation. + * + * For 1<=axis a row-wise copy is taking place. + * + * @param[in] window Region on which to execute the kernel. (Must be a region of the window returned by window()) + * @param[in] info Info about executing thread and CPU. + */ + template + void gather_n_axis(const Window &window, const ThreadInfo &info); + + using kernel_ptr = void (NEGatherKernel::*)(const Window &window, const ThreadInfo &info); + + const ITensor *_input; + const ITensor *_indices; + int _axis; + ITensor *_output; + kernel_ptr _func; +}; +} // namespace arm_compute +#endif /* __ARM_COMPUTE_NEGATHERKERNEL_H__ */ diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h index 606f282e16..cc13ff5d5e 100644 --- a/arm_compute/runtime/NEON/NEFunctions.h +++ b/arm_compute/runtime/NEON/NEFunctions.h @@ -75,6 +75,7 @@ #include "arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h" #include "arm_compute/runtime/NEON/functions/NEGEMMLowpOutputStage.h" #include "arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h" +#include "arm_compute/runtime/NEON/functions/NEGather.h" #include "arm_compute/runtime/NEON/functions/NEGaussian3x3.h" #include "arm_compute/runtime/NEON/functions/NEGaussian5x5.h" #include "arm_compute/runtime/NEON/functions/NEGaussianPyramid.h" diff --git a/arm_compute/runtime/NEON/functions/NEGather.h b/arm_compute/runtime/NEON/functions/NEGather.h new file mode 100644 index 0000000000..f6de961d2a --- /dev/null +++ b/arm_compute/runtime/NEON/functions/NEGather.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __ARM_COMPUTE_NEGATHER_H__ +#define __ARM_COMPUTE_NEGATHER_H__ + +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h" + +namespace arm_compute +{ +class ITensor; + +/** Basic function to run @ref NEGatherKernel */ +class NEGather : public INESimpleFunctionNoBorder +{ +public: + /** Initialise the kernel's inputs and outputs + * + * @param[in] input Source tensor. Supported tensor rank: up to 4. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32 + * @param[in] indices Indices tensor. Supported tensor rank: up to 1. Must be one of the following type: U32/S32. Each value Must be in range [0, input.shape[@p axis]) + * @param[out] output Destination tensor. Data type supported: Same as @p input + * @param[in] axis (Optional) The axis in @p input to gather @p indices from. Defaults to 0 + */ + void configure(const ITensor *input, const ITensor *indices, ITensor *output, int axis = 0); + + /** Static function to check if given info will lead to a valid configuration of @ref NEGatherKernel + * + * @param[in] input Source tensor info. Supported tensor rank: up to 4. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32 + * @param[in] indices Indices tensor info. Supported tensor rank: up to 1. Must be one of the following types: U32/S32. Each value Must be in range [0, input.shape[@p axis]) + * @param[in] output Destination tensor info. Data type supported: Same as @p input + * @param[in] axis (Optional) The axis in @p input to gather @p indices from. Defaults to 0 + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis); +}; + +} // namespace arm_compute + +#endif /* __ARM_COMPUTE_NEGATHER_H__ */ diff --git a/src/core/NEON/kernels/NEGatherKernel.cpp b/src/core/NEON/kernels/NEGatherKernel.cpp new file mode 100644 index 0000000000..a05059d3b4 --- /dev/null +++ b/src/core/NEON/kernels/NEGatherKernel.cpp @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/NEON/kernels/NEGatherKernel.h" + +#include "arm_compute/core/Coordinates.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/IAccessWindow.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/Window.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" + +namespace arm_compute +{ +namespace +{ +/** Validate the indices + * + * Validate that indices are not negative + * + * @param[in] indices Indices tensor info. + */ +template +void validate_indices(const ITensor *indices) +{ + for(size_t i = 0; i < indices->info()->tensor_shape()[0]; ++i) + { + ARM_COMPUTE_ERROR_ON(*(reinterpret_cast(indices->ptr_to_element(Coordinates(i)))) < 0); + } +} + +} // namespace + +NEGatherKernel::NEGatherKernel() + : _input{}, _indices{}, _axis{}, _output{}, _func{} +{ +} + +template +inline void NEGatherKernel::gather_0_axis(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + + // Validate that the indices are not negative + validate_indices(_indices); + + Iterator output_it(_output, window); + execute_window_loop(window, [&](const Coordinates & id) + { + Coordinates gather_id(id); + + auto new_index = *(reinterpret_cast(_indices->ptr_to_element(Coordinates(id[0])))); + gather_id.set(0, new_index); + + std::copy_n(_input->ptr_to_element(gather_id), _output->info()->element_size(), output_it.ptr()); + }, + output_it); +} + +template +void NEGatherKernel::gather_n_axis(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + + // Validate that the indices are not negative + validate_indices(_indices); + + Window output_window{ window }; + output_window.set(Window::DimX, Window::Dimension(0, 1, 1)); + + Iterator output_it(_output, output_window); + execute_window_loop(output_window, [&](const Coordinates & id) + { + Coordinates gather_id(id); + + auto new_index = *(reinterpret_cast(_indices->ptr_to_element(Coordinates(id[_axis])))); + gather_id.set(_axis, new_index); + + std::copy_n(_input->ptr_to_element(gather_id), _input->info()->dimension(0) * _output->info()->element_size(), output_it.ptr()); + }, + output_it); +} + +void NEGatherKernel::configure(const ITensor *input, const ITensor *indices, ITensor *output, int axis) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, indices); + ARM_COMPUTE_ERROR_ON(indices->info()->num_dimensions() != 1); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, 1, DataType::U32, DataType::S32); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QASYMM8, DataType::U16, DataType::S16, DataType::U32, DataType::S32, DataType::F16, DataType::F32); + + _input = input; + _indices = indices; + _output = output; + _axis = axis; + + if(_axis < 0) + { + _axis += input->info()->num_dimensions(); + } + ARM_COMPUTE_ERROR_ON(0 > _axis || _axis >= static_cast(input->info()->num_dimensions())); + + if(0 == _axis) + { + switch(_indices->info()->data_type()) + { + case DataType::U32: + _func = &NEGatherKernel::gather_0_axis; + break; + case DataType::S32: + _func = &NEGatherKernel::gather_0_axis; + break; + default: + ARM_COMPUTE_ERROR("Not supported"); + break; + } + } + else + { + switch(_indices->info()->data_type()) + { + case DataType::U32: + _func = &NEGatherKernel::gather_n_axis; + break; + case DataType::S32: + _func = &NEGatherKernel::gather_n_axis; + break; + default: + ARM_COMPUTE_ERROR("Not supported"); + break; + } + } + // Output auto initialization if not yet initialized + TensorShape output_shape = arm_compute::misc::shape_calculator::compute_gather_shape(input->info()->tensor_shape(), indices->info()->tensor_shape(), _axis); + auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type()); + + // Create window + Window win = calculate_max_window(*output->info(), Steps()); + output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape())); + + INEKernel::configure(win); +} + +Status NEGatherKernel::validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, indices, output); + ARM_COMPUTE_RETURN_ERROR_ON(indices->num_dimensions() > 1); + ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); + + if(axis < 0) + { + axis += input->num_dimensions(); + } + + ARM_COMPUTE_RETURN_ERROR_ON(0 > axis || axis >= static_cast(input->num_dimensions())); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QASYMM8, + DataType::U16, DataType::S16, + DataType::U32, DataType::S32, DataType::F16, DataType::F32); + + if(output->total_size() != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); + TensorShape output_shape = arm_compute::misc::shape_calculator::compute_gather_shape(input->tensor_shape(), indices->tensor_shape(), axis); + ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() != output->tensor_shape().total_size()); + } + + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, 1, DataType::U32, DataType::S32); + + return Status{}; +} + +void NEGatherKernel::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON(_func == nullptr); + + (this->*_func)(window, info); +} + +} // namespace arm_compute diff --git a/src/runtime/NEON/functions/NEGather.cpp b/src/runtime/NEON/functions/NEGather.cpp new file mode 100644 index 0000000000..078bd5ab26 --- /dev/null +++ b/src/runtime/NEON/functions/NEGather.cpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/NEON/functions/NEGather.h" + +#include "arm_compute/core/NEON/kernels/NEGatherKernel.h" +#include "support/ToolchainSupport.h" + +#include + +namespace arm_compute +{ +void NEGather::configure(const ITensor *input, const ITensor *indices, ITensor *output, int axis) +{ + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(input, indices, output, axis); + _kernel = std::move(k); +} + +Status NEGather::validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis) +{ + return NEGatherKernel::validate(input, indices, output, axis); +} + +} // namespace arm_compute diff --git a/tests/validation/NEON/Gather.cpp b/tests/validation/NEON/Gather.cpp new file mode 100644 index 0000000000..2e6a3d4ed2 --- /dev/null +++ b/tests/validation/NEON/Gather.cpp @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/functions/NEGather.h" +#include "arm_compute/runtime/Tensor.h" +#include "arm_compute/runtime/TensorAllocator.h" + +#include "tests/NEON/Accessor.h" +#include "tests/PaddingCalculator.h" +#include "tests/datasets/GatherDataset.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/GatherFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +TEST_SUITE(NEON) +TEST_SUITE(Gather) + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 27U), 1, DataType::F16), + TensorInfo(TensorShape(27U, 27U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 27U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 27U), 1, DataType::F32), // Invalid Indices data type + TensorInfo(TensorShape(27U, 27U), 1, DataType::F32), // Invalid Indices dimensionality + TensorInfo(TensorShape(5U, 5U, 5U, 5U, 5U), 1, DataType::F32), // Invalid Input dimensionality + TensorInfo(TensorShape(27U, 27U), 1, DataType::F16), // Mismatching data type input/output + TensorInfo(TensorShape(27U, 27U), 1, DataType::F32), // Invalid positive axis value + TensorInfo(TensorShape(27U, 27U), 1, DataType::F16), // Invalid negative axis value + }), + framework::dataset::make("IndicesInfo", { + TensorInfo(TensorShape(10U), 1, DataType::U32), + TensorInfo(TensorShape(10U), 1, DataType::U32), + TensorInfo(TensorShape(10U), 1, DataType::U32), + TensorInfo(TensorShape(10U), 1, DataType::U8), + TensorInfo(TensorShape(10U, 10U), 1, DataType::U32), + TensorInfo(TensorShape(10U), 1, DataType::U32), + TensorInfo(TensorShape(10U), 1, DataType::U32), + TensorInfo(TensorShape(10U), 1, DataType::U32), + TensorInfo(TensorShape(10U), 1, DataType::U32), + })), + framework::dataset::make("OutputInfo", { + TensorInfo(TensorShape(10U, 27U), 1, DataType::F16), + TensorInfo(TensorShape(27U, 10U), 1, DataType::F32), + TensorInfo(TensorShape(10U, 27U), 1, DataType::F32), + TensorInfo(TensorShape(10U, 27U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 10U), 1, DataType::F32), + TensorInfo(TensorShape(10U, 5U, 5U, 5U, 5U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 10U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 27U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 27U), 1, DataType::F16), + })), + framework::dataset::make("Axis", { + 0, + 1, + -2, + 0, + 1, + 0, + 1, + 2, + -3, + })), + framework::dataset::make("Expected", { true, true, true, false, false, false, false, false, false })), + input_info, indices_info, output_info, axis, expected) +{ + const Status status = NEGather::validate(&input_info.clone()->set_is_resizable(true), &indices_info.clone()->set_is_resizable(true), &output_info.clone()->set_is_resizable(true), axis); + ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + +DATA_TEST_CASE(Configuration, + framework::DatasetMode::ALL, + combine(arm_compute::test::datasets::SmallGatherDataset(), framework::dataset::make("DataType", { DataType::F16, DataType::F32 })), + input_shape, indices_shape, axis, data_type) +{ + const uint32_t actual_axis = wrap_around(axis, static_cast(input_shape.num_dimensions())); + Tensor src = create_tensor(input_shape, data_type); + Tensor indices = create_tensor(indices_shape, DataType::U32); + TensorShape dst_shape = arm_compute::misc::shape_calculator::compute_gather_shape(input_shape, indices_shape, actual_axis); + Tensor dst = create_tensor(dst_shape, data_type); + + // Create and Configure function + NEGather gather; + gather.configure(&src, &indices, &dst, axis); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(dst.info()->tensor_shape()); + validate(dst.info()->valid_region(), valid_region); +} + +template +using NEGatherFixture = GatherFixture; + +TEST_SUITE(Float) +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEGatherFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallGatherDataset(), framework::dataset::make("DataType", DataType::F16))) +{ + // Validate output + validate(Accessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, + NEGatherFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeGatherDataset(), framework::dataset::make("DataType", DataType::F16))) +{ + // Validate output + validate(Accessor(_target), _reference); +} +TEST_SUITE_END() // FP16 + +TEST_SUITE(FP32) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEGatherFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallGatherDataset(), framework::dataset::make("DataType", DataType::F32))) +{ + // Validate output + validate(Accessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, + NEGatherFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeGatherDataset(), framework::dataset::make("DataType", DataType::F32))) +{ + // Validate output + validate(Accessor(_target), _reference); +} +TEST_SUITE_END() // FP32 +TEST_SUITE_END() // Float + +TEST_SUITE(U8) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEGatherFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallGatherDataset(), framework::dataset::make("DataType", DataType::U8))) +{ + // Validate output + validate(Accessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, + NEGatherFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeGatherDataset(), framework::dataset::make("DataType", DataType::U8))) +{ + // Validate output + validate(Accessor(_target), _reference); +} +TEST_SUITE_END() // U8 + +TEST_SUITE(U16) +FIXTURE_DATA_TEST_CASE(RunSmall, + NEGatherFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallGatherDataset(), framework::dataset::make("DataType", DataType::U16))) +{ + // Validate output + validate(Accessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, + NEGatherFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeGatherDataset(), framework::dataset::make("DataType", DataType::U16))) +{ + // Validate output + validate(Accessor(_target), _reference); +} +TEST_SUITE_END() // U16 + +TEST_SUITE_END() // Gather +TEST_SUITE_END() // NEON +} // namespace validation +} // namespace test +} // namespace arm_compute -- cgit v1.2.1