From 8529bd68d8a979874b8c53c0e26af411aae058f5 Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Wed, 21 Nov 2018 11:53:04 +0000 Subject: COMPMID-1727 - CL: Implement Gather Change-Id: I3d859da09a4de1019bb8c2046725eab942247927 Reviewed-on: https://review.mlplatform.org/386 Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas --- arm_compute/core/CL/CLKernels.h | 3 +- arm_compute/core/CL/kernels/CLGatherKernel.h | 80 ++++++++++ arm_compute/core/utils/misc/ShapeCalculator.h | 14 +- arm_compute/runtime/CL/CLFunctions.h | 3 +- arm_compute/runtime/CL/functions/CLGather.h | 59 ++++++++ src/core/CL/CLKernelLibrary.cpp | 7 +- src/core/CL/cl_kernels/gather.cl | 91 +++++++++++ src/core/CL/kernels/CLGatherKernel.cpp | 135 +++++++++++++++++ src/runtime/CL/functions/CLGather.cpp | 43 ++++++ tests/datasets/GatherDataset.h | 169 +++++++++++++++++++++ tests/validation/CL/Gather.cpp | 210 ++++++++++++++++++++++++++ tests/validation/fixtures/GatherFixture.h | 142 +++++++++++++++++ tests/validation/reference/Gather.cpp | 75 +++++++++ tests/validation/reference/Gather.h | 46 ++++++ 14 files changed, 1073 insertions(+), 4 deletions(-) create mode 100644 arm_compute/core/CL/kernels/CLGatherKernel.h create mode 100644 arm_compute/runtime/CL/functions/CLGather.h create mode 100644 src/core/CL/cl_kernels/gather.cl create mode 100644 src/core/CL/kernels/CLGatherKernel.cpp create mode 100644 src/runtime/CL/functions/CLGather.cpp create mode 100644 tests/datasets/GatherDataset.h create mode 100644 tests/validation/CL/Gather.cpp create mode 100644 tests/validation/fixtures/GatherFixture.h create mode 100644 tests/validation/reference/Gather.cpp create mode 100644 tests/validation/reference/Gather.h diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h index cfcfb7400b..e68769b6ae 100644 --- a/arm_compute/core/CL/CLKernels.h +++ b/arm_compute/core/CL/CLKernels.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018 ARM Limited. + * Copyright (c) 2016-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -83,6 +83,7 @@ #include "arm_compute/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h" #include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h" +#include "arm_compute/core/CL/kernels/CLGatherKernel.h" #include "arm_compute/core/CL/kernels/CLGaussian3x3Kernel.h" #include "arm_compute/core/CL/kernels/CLGaussian5x5Kernel.h" #include "arm_compute/core/CL/kernels/CLGaussianPyramidKernel.h" diff --git a/arm_compute/core/CL/kernels/CLGatherKernel.h b/arm_compute/core/CL/kernels/CLGatherKernel.h new file mode 100644 index 0000000000..4dac6b0d1f --- /dev/null +++ b/arm_compute/core/CL/kernels/CLGatherKernel.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2018-2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_CLGATHERKERNEL_H__ +#define __ARM_COMPUTE_CLGATHERKERNEL_H__ + +#include "arm_compute/core/CL/ICLKernel.h" +#include "arm_compute/core/Types.h" + +namespace arm_compute +{ +class ICLTensor; + +/** Interface for the kernel to perform tensor reshaping */ +class CLGatherKernel : public ICLKernel +{ +public: + /** Default constructor */ + CLGatherKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLGatherKernel(const CLGatherKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLGatherKernel &operator=(const CLGatherKernel &) = delete; + /** Allow instances of this class to be moved */ + CLGatherKernel(CLGatherKernel &&) = default; + /** Allow instances of this class to be moved */ + CLGatherKernel &operator=(CLGatherKernel &&) = default; + /** Default destructor */ + ~CLGatherKernel() = default; + /** Initialise the kernel's inputs and outputs + * + * @param[in] input Source tensor. Supported tensor rank: up to 4. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32 + * @param[in] indices Indices tensor. Supported tensor rank: up to 1. Must be one of the following type: S64. Each value Must be in range [0, input.shape[@p axis]) + * @param[out] output Destination tensor. Data type supported: Same as @p input + * @param[in] axis (Optional) The axis in @p input to gather @p indices from. Negative values wrap around. Defaults to 0 + */ + void configure(const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis = 0); + + /** Static function to check if given info will lead to a valid configuration of @ref CLGatherKernel + * + * @param[in] input Source tensor info. Supported tensor rank: up to 4. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32 + * @param[in] indices Indices tensor info. Supported tensor rank: up to 4. Must be one of the following type: S64. Each value Must be in range [0, input.shape[@p axis]) + * @param[in] output Destination tensor info. Data type supported: Same as @p input + * @param[in] axis (Optional) The axis in @p input to gather @p indices from. Negative values wrap around. Defaults to 0 + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis = 0); + + // Inherited methods overridden: + void run(const Window &window, cl::CommandQueue &queue) override; + +private: + const ICLTensor *_input; /**< Source tensor */ + const ICLTensor *_indices; /**< Indices tensor */ + ICLTensor *_output; /**< Destination tensor */ + int _axis; /**< Axis index */ +}; +} // namespace arm_compute +#endif /*__ARM_COMPUTE_CLGATHERKERNEL_H__ */ diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h index 4756ff4f97..ba0d8e254d 100644 --- a/arm_compute/core/utils/misc/ShapeCalculator.h +++ b/arm_compute/core/utils/misc/ShapeCalculator.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -874,6 +874,18 @@ inline TensorShape compute_stack_shape(const ITensorInfo &a, unsigned int axis, } return shape_out; } + +inline TensorShape compute_gather_shape(const TensorShape &input_shape, const TensorShape &indices_shape, uint32_t actual_axis) +{ + ARM_COMPUTE_ERROR_ON(indices_shape.num_dimensions() > 1); + ARM_COMPUTE_ERROR_ON(input_shape.num_dimensions() > 4); + ARM_COMPUTE_ERROR_ON(actual_axis >= input_shape.num_dimensions()); + + TensorShape output_shape = input_shape; + output_shape[actual_axis] = indices_shape[0]; + + return output_shape; +} } // namespace shape_calculator } // namespace misc } // namespace arm_compute diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h index e0cf65b923..686d266557 100644 --- a/arm_compute/runtime/CL/CLFunctions.h +++ b/arm_compute/runtime/CL/CLFunctions.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018 ARM Limited. + * Copyright (c) 2016-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -76,6 +76,7 @@ #include "arm_compute/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.h" #include "arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h" #include "arm_compute/runtime/CL/functions/CLGEMMTranspose1xW.h" +#include "arm_compute/runtime/CL/functions/CLGather.h" #include "arm_compute/runtime/CL/functions/CLGaussian3x3.h" #include "arm_compute/runtime/CL/functions/CLGaussian5x5.h" #include "arm_compute/runtime/CL/functions/CLGaussianPyramid.h" diff --git a/arm_compute/runtime/CL/functions/CLGather.h b/arm_compute/runtime/CL/functions/CLGather.h new file mode 100644 index 0000000000..048804dfb2 --- /dev/null +++ b/arm_compute/runtime/CL/functions/CLGather.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018-2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __ARM_COMPUTE_CLGATHER_H__ +#define __ARM_COMPUTE_CLGATHER_H__ + +#include "arm_compute/runtime/CL/ICLSimpleFunction.h" + +namespace arm_compute +{ +class ICLTensor; + +/** Basic function to run @ref CLGatherKernel */ +class CLGather : public ICLSimpleFunction +{ +public: + /** Initialise the kernel's inputs and outputs + * + * @param[in] input Source tensor. Supported tensor rank: up to 4. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32 + * @param[in] indices Indices tensor. Supported tensor rank: up to 1. Must be one of the following type: S64. Each value Must be in range [0, input.shape[@p axis]) + * @param[out] output Destination tensor. Data type supported: Same as @p input + * @param[in] axis (Optional) The axis in @p input to gather @p indices from. Defaults to 0 + */ + void configure(const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis = 0); + + /** Static function to check if given info will lead to a valid configuration of @ref CLGatherKernel + * + * @param[in] input Source tensor info. Supported tensor rank: up to 4. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32 + * @param[in] indices Indices tensor info. Supported tensor rank: up to 4. Must be one of the following types: S64. Each value Must be in range [0, input.shape[@p axis]) + * @param[in] output Destination tensor info. Data type supported: Same as @p input + * @param[in] axis (Optional) The axis in @p input to gather @p indices from. Defaults to 0 + * + * @return a status + */ + static Status validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis = 0); +}; +} // namespace arm_compute +#endif /* __ARM_COMPUTE_CLGATHER_H__ */ diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index 470a50f76b..2bc2d06827 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2018 ARM Limited. + * Copyright (c) 2016-2019 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -262,6 +262,7 @@ const std::map CLKernelLibrary::_kernel_program_map = { "finalize", "optical_flow_pyramid_lk.cl" }, { "fuse_batchnormalization_layer", "batchnormalization_layer.cl" }, { "floor_layer", "floor.cl" }, + { "gather", "gather.cl" }, { "gaussian1x5_sub_x", "gaussian_pyramid.cl" }, { "gaussian5x1_sub_y", "gaussian_pyramid.cl" }, { "gemm_accumulate_biases", "gemm.cl" }, @@ -682,6 +683,10 @@ const std::map CLKernelLibrary::_program_source_map = { "floor.cl", #include "./cl_kernels/floor.clembed" + }, + { + "gather.cl", +#include "./cl_kernels/gather.clembed" }, { "gaussian_pyramid.cl", diff --git a/src/core/CL/cl_kernels/gather.cl b/src/core/CL/cl_kernels/gather.cl new file mode 100644 index 0000000000..34593ef60f --- /dev/null +++ b/src/core/CL/cl_kernels/gather.cl @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2018-2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "helpers.h" + +#if defined(DATA_TYPE) && defined(AXIS) + +/** Performs the Gather operation along the chosen axis + * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short + * @note Axis should be given as a preprocessor argument using -DAXIS=axis. e.g. -DAXIS=1 + * @attention Output tensor depth should be given as a preprocessor argument using -DOUTPUT_DIM_Z=size. e.g. -DOUTPUT_DIM_Z=16 + * @attention Input tensor depth should be given as a preprocessor argument using -DINPUT_DIM_Z=size. e.g. -DINPUT_DIM_Z=16 + * + * + * @param[in] input_ptr Pointer to the source tensor. Supported data types: U8/S8/U16/S16/U32/S32/F16/F32 + * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] input_step_x input_stride_x * number of elements along X processed per work item (in bytes) + * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] input_step_y input_stride_y * number of elements along Y processed per work item (in bytes) + * @param[in] input_stride_z Stride of the source tensor in Y dimension (in bytes) + * @param[in] input_step_z input_stride_z * number of elements along Z processed per work item (in bytes) + * @param[in] input_stride_w Stride of the source tensor in Z dimension (in bytes) + * @param[in] input_step_w input_stride_w * number of elements along W processed per work item (in bytes) + * @param[in] input_offset_first_element_in_bytes Offset of the first element in the source tensor + * @param[in] indices_ptr Pointer to the indices vector. Supported data types: U32. + * @param[in] indices_stride_x Stride of the indices vector in X dimension (in bytes) + * @param[in] indices_step_x input_stride_x * number of elements along X processed per work item (in bytes) + * @param[in] indices_offset_first_element_in_bytes Offset of the first element in the indices vector + * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr + * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] output_step_x output_stride_x * number of elements along X processed per work item (in bytes) + * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] output_step_y output_stride_y * number of elements along Y processed per work item (in bytes) + * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes) + * @param[in] output_step_z output_stride_z * number of elements along Z processed per work item (in bytes) + * @param[in] output_stride_w Stride of the destination tensor in W dimension (in bytes) + * @param[in] output_step_w output_stride_w * number of elements along W processed per work item (in bytes) + * @param[in] output_offset_first_element_in_bytes Offset of the first element in the destination tensor + */ +__kernel void gather( + TENSOR4D_DECLARATION(input), + VECTOR_DECLARATION(indices), + TENSOR4D_DECLARATION(output)) +{ + const int px = get_global_id(0); + const int py = get_global_id(1); + const int pz = get_global_id(2) % OUTPUT_DIM_Z; + const int pw = get_global_id(2) / OUTPUT_DIM_Z; + + const Tensor4D input = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, INPUT_DIM_Z); + const Vector indices = CONVERT_TO_VECTOR_STRUCT_NO_STEP(indices); + Tensor4D output = CONVERT_TO_TENSOR4D_STRUCT(output, OUTPUT_DIM_Z); + +#if AXIS == 0 + const uint index = *(__global const uint *)vector_offset(&indices, px); + __global const uchar *input_addr = tensor4D_offset(&input, index, py, pz, pw); +#elif AXIS == 1 + const uint index = *(__global const uint *)vector_offset(&indices, py); + __global const uchar *input_addr = tensor4D_offset(&input, px, index, pz, pw); +#elif AXIS == 2 + const uint index = *(__global const uint *)vector_offset(&indices, pz); + __global const uchar *input_addr = tensor4D_offset(&input, px, py, index, pw); +#elif AXIS == 3 + const uint index = *(__global const uint *)vector_offset(&indices, pw); + __global const uchar *input_addr = tensor4D_offset(&input, px, py, pz, index); +#endif //AXIS + + *(__global DATA_TYPE *)output.ptr = *((__global const DATA_TYPE *)input_addr); +} + +#endif //defined(DATA_TYPE) && defined(AXIS) \ No newline at end of file diff --git a/src/core/CL/kernels/CLGatherKernel.cpp b/src/core/CL/kernels/CLGatherKernel.cpp new file mode 100644 index 0000000000..006e755b30 --- /dev/null +++ b/src/core/CL/kernels/CLGatherKernel.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2018-2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/CL/kernels/CLGatherKernel.h" + +#include "arm_compute/core/AccessWindowStatic.h" +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/CL/CLKernelLibrary.h" +#include "arm_compute/core/CL/CLValidate.h" +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/CL/OpenCL.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/IAccessWindow.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Window.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" + +#include + +namespace arm_compute +{ +namespace +{ +inline Status validate_arguments(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis) +{ + const uint32_t actual_axis = wrap_around(axis, static_cast(input->num_dimensions())); + ARM_COMPUTE_RETURN_ERROR_ON(indices->num_dimensions() > 1); + ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4); + ARM_COMPUTE_RETURN_ERROR_ON(actual_axis >= input->num_dimensions()); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output); + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QASYMM8, + DataType::U16, DataType::S16, + DataType::U32, DataType::S32, DataType::F16, DataType::F32); + + if(output->total_size() != 0) + { + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output); + TensorShape output_shape = arm_compute::misc::shape_calculator::compute_gather_shape(input->tensor_shape(), indices->tensor_shape(), actual_axis); + ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() != output->tensor_shape().total_size()); + } + + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, 1, DataType::U32); + + return Status{}; +} + +std::pair validate_and_configure_window(ITensorInfo *input, ITensorInfo *indices, ITensorInfo *output, int axis) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, indices); + const uint32_t actual_axis = wrap_around(axis, static_cast(input->num_dimensions())); + // Output auto initialization if not yet initialized + TensorShape output_shape = arm_compute::misc::shape_calculator::compute_gather_shape(input->tensor_shape(), indices->tensor_shape(), actual_axis); + auto_init_if_empty((*output), output_shape, 1, input->data_type()); + + // Create window + Window win = calculate_max_window(*output, Steps()); + output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape())); + + return std::make_pair(Status{}, win); +} + +} // namespace + +CLGatherKernel::CLGatherKernel() + : _input(nullptr), _indices(nullptr), _output(nullptr), _axis(0) +{ +} + +void CLGatherKernel::configure(const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, indices); + ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), indices->info(), output->info(), axis)); + + // Configure kernel window + auto win_config = validate_and_configure_window(input->info(), indices->info(), output->info(), axis); + ARM_COMPUTE_ERROR_THROW_ON(win_config.first); + + _input = input; + _output = output; + _indices = indices; + _axis = wrap_around(axis, static_cast(input->info()->num_dimensions())); + + // Set build options + CLBuildOptions build_opts; + build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); + build_opts.add_option("-DOUTPUT_DIM_Z=" + support::cpp11::to_string(output->info()->dimension(2))); + build_opts.add_option("-DINPUT_DIM_Z=" + support::cpp11::to_string(input->info()->dimension(2))); + build_opts.add_option("-DAXIS=" + support::cpp11::to_string(_axis)); + + // Create kernel + _kernel = static_cast(CLKernelLibrary::get().create_kernel("gather", build_opts.options())); + ICLKernel::configure_internal(win_config.second); +} + +Status CLGatherKernel::validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis) +{ + ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, indices, output, axis)); + ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), indices->clone().get(), output->clone().get(), axis).first); + return Status{}; +} + +void CLGatherKernel::run(const Window &window, cl::CommandQueue &queue) +{ + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window); + + Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ); + unsigned int idx = 0; + add_4D_tensor_argument(idx, _input, window_collapsed); + add_1D_tensor_argument(idx, _indices, window_collapsed); + add_4D_tensor_argument(idx, _output, window_collapsed); + enqueue(queue, *this, window_collapsed); +} +} // namespace arm_compute diff --git a/src/runtime/CL/functions/CLGather.cpp b/src/runtime/CL/functions/CLGather.cpp new file mode 100644 index 0000000000..459438e702 --- /dev/null +++ b/src/runtime/CL/functions/CLGather.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018-2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/CL/functions/CLGather.h" + +#include "arm_compute/core/CL/ICLTensor.h" +#include "arm_compute/core/CL/kernels/CLGatherKernel.h" +#include "support/ToolchainSupport.h" + +namespace arm_compute +{ +void CLGather::configure(const ICLTensor *input, const ICLTensor *indices, ICLTensor *output, int axis) +{ + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(input, indices, output, axis); + _kernel = std::move(k); +} + +Status CLGather::validate(const ITensorInfo *input, const ITensorInfo *indices, const ITensorInfo *output, int axis) +{ + return CLGatherKernel::validate(input, indices, output, axis); +} +} // namespace arm_compute diff --git a/tests/datasets/GatherDataset.h b/tests/datasets/GatherDataset.h new file mode 100644 index 0000000000..29f2cccf57 --- /dev/null +++ b/tests/datasets/GatherDataset.h @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2018-2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef ARM_COMPUTE_TEST_GATHER_DATASET +#define ARM_COMPUTE_TEST_GATHER_DATASET + +#include "utils/TypePrinter.h" + +#include "arm_compute/core/Types.h" + +namespace arm_compute +{ +namespace test +{ +namespace datasets +{ +class GatherDataset +{ +public: + using type = std::tuple; + + struct iterator + { + iterator(std::vector::const_iterator input_shapes_it, + std::vector::const_iterator starts_values_it, + std::vector::const_iterator axis_it) + : _input_shapes_it{ std::move(input_shapes_it) }, + _indices_shapes_it{ std::move(starts_values_it) }, + _axis_it{ std::move(axis_it) } + { + } + + std::string description() const + { + std::stringstream description; + description << "InputShape=" << *_input_shapes_it << ":"; + description << "IndicesShape=" << *_indices_shapes_it << ":"; + description << "Axis=" << *_axis_it << ":"; + return description.str(); + } + + GatherDataset::type operator*() const + { + return std::make_tuple(*_input_shapes_it, *_indices_shapes_it, *_axis_it); + } + + iterator &operator++() + { + ++_input_shapes_it; + ++_indices_shapes_it; + ++_axis_it; + return *this; + } + + private: + std::vector::const_iterator _input_shapes_it; + std::vector::const_iterator _indices_shapes_it; + std::vector::const_iterator _axis_it; + }; + + iterator begin() const + { + return iterator(_input_shapes.begin(), _indices_shapes.begin(), _axis.begin()); + } + + int size() const + { + return std::min(_input_shapes.size(), std::min(_indices_shapes.size(), _axis.size())); + } + + void add_config(TensorShape input_shape, TensorShape indices_shape, int axis) + { + _input_shapes.emplace_back(std::move(input_shape)); + _indices_shapes.emplace_back(std::move(indices_shape)); + _axis.emplace_back(std::move(axis)); + } + +protected: + GatherDataset() = default; + GatherDataset(GatherDataset &&) = default; + +private: + std::vector _input_shapes{}; + std::vector _indices_shapes{}; + std::vector _axis{}; +}; + +class SmallGatherDataset final : public GatherDataset +{ +public: + SmallGatherDataset() + { + // 2D input + add_config(TensorShape(15U, 15U), TensorShape(5U), 0); + add_config(TensorShape(15U, 15U), TensorShape(5U), 1); + add_config(TensorShape(5U, 5U), TensorShape(80U), -1); + + // 3D input + add_config(TensorShape(5U, 5U, 5U), TensorShape(19U), 0); + add_config(TensorShape(5U, 4U, 6U), TensorShape(30U), 1); + add_config(TensorShape(3U, 5U, 7U), TensorShape(20U), 2); + add_config(TensorShape(5U, 4U, 6U), TensorShape(30U), -1); + add_config(TensorShape(3U, 5U, 7U), TensorShape(20U), -2); + + // 4D input + add_config(TensorShape(4U, 3U, 4U, 5U), TensorShape(4U), 0); + add_config(TensorShape(4U, 3U, 5U, 5U), TensorShape(5U), 1); + add_config(TensorShape(4U, 3U, 2U, 5U), TensorShape(6U), 2); + add_config(TensorShape(3U, 4U, 4U, 6U), TensorShape(7U), 3); + add_config(TensorShape(4U, 3U, 5U, 5U), TensorShape(5U), -1); + add_config(TensorShape(4U, 3U, 2U, 5U), TensorShape(6U), -2); + add_config(TensorShape(3U, 4U, 4U, 6U), TensorShape(7U), -3); + } +}; + +class LargeGatherDataset final : public GatherDataset +{ +public: + LargeGatherDataset() + { + // 2D input + add_config(TensorShape(150U, 150U), TensorShape(50U), 0); + add_config(TensorShape(150U, 150U), TensorShape(50U), 1); + add_config(TensorShape(150U, 150U), TensorShape(50U), -1); + + // 3D input + add_config(TensorShape(50U, 40U, 60U), TensorShape(33U), 0); + add_config(TensorShape(40U, 50U, 60U), TensorShape(24U), 1); + add_config(TensorShape(70U, 80U, 100U), TensorShape(50U), 2); + add_config(TensorShape(40U, 50U, 60U), TensorShape(24U), -1); + add_config(TensorShape(70U, 80U, 100U), TensorShape(50U), -2); + + // 4D input + add_config(TensorShape(30U, 40U, 20U, 20U), TensorShape(33U), 0); + add_config(TensorShape(23U, 10U, 60U, 20U), TensorShape(24U), 1); + add_config(TensorShape(14U, 20U, 10U, 31U), TensorShape(30U), 2); + add_config(TensorShape(34U, 10U, 40U, 20U), TensorShape(50U), 3); + add_config(TensorShape(23U, 10U, 60U, 20U), TensorShape(24U), -1); + add_config(TensorShape(14U, 20U, 10U, 31U), TensorShape(30U), -2); + add_config(TensorShape(34U, 10U, 40U, 20U), TensorShape(50U), -3); + } +}; + +} // namespace datasets +} // namespace test +} // namespace arm_compute + +#endif /* ARM_COMPUTE_TEST_GATHER_DATASET */ diff --git a/tests/validation/CL/Gather.cpp b/tests/validation/CL/Gather.cpp new file mode 100644 index 0000000000..cc892a30d1 --- /dev/null +++ b/tests/validation/CL/Gather.cpp @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2018-2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/CL/CLTensor.h" +#include "arm_compute/runtime/CL/CLTensorAllocator.h" +#include "arm_compute/runtime/CL/functions/CLGather.h" + +#include "tests/CL/CLAccessor.h" +#include "tests/datasets/GatherDataset.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/GatherFixture.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +TEST_SUITE(CL) +TEST_SUITE(Gather) + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 27U), 1, DataType::F16), + TensorInfo(TensorShape(27U, 27U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 27U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 27U), 1, DataType::F32), // Invalid Indices data type + TensorInfo(TensorShape(27U, 27U), 1, DataType::F32), // Invalid Indices dimensionality + TensorInfo(TensorShape(5U, 5U, 5U, 5U, 5U), 1, DataType::F32), // Invalid Input dimensionality + TensorInfo(TensorShape(27U, 27U), 1, DataType::F16), // Mismatching data type input/output + TensorInfo(TensorShape(27U, 27U), 1, DataType::F32), // Invalid positive axis value + TensorInfo(TensorShape(27U, 27U), 1, DataType::F16), // Invalid negative axis value + }), + framework::dataset::make("IndicesInfo", { + TensorInfo(TensorShape(10U), 1, DataType::U32), + TensorInfo(TensorShape(10U), 1, DataType::U32), + TensorInfo(TensorShape(10U), 1, DataType::U32), + TensorInfo(TensorShape(10U), 1, DataType::U8), + TensorInfo(TensorShape(10U, 10U), 1, DataType::U32), + TensorInfo(TensorShape(10U), 1, DataType::U32), + TensorInfo(TensorShape(10U), 1, DataType::U32), + TensorInfo(TensorShape(10U), 1, DataType::U32), + TensorInfo(TensorShape(10U), 1, DataType::U32), + })), + framework::dataset::make("OutputInfo", { + TensorInfo(TensorShape(10U, 27U), 1, DataType::F16), + TensorInfo(TensorShape(27U, 10U), 1, DataType::F32), + TensorInfo(TensorShape(10U, 27U), 1, DataType::F32), + TensorInfo(TensorShape(10U, 27U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 10U), 1, DataType::F32), + TensorInfo(TensorShape(10U, 5U, 5U, 5U, 5U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 10U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 27U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 27U), 1, DataType::F16), + })), + framework::dataset::make("Axis", { + 0, + 1, + -2, + 0, + 1, + 0, + 1, + 2, + -3, + })), + framework::dataset::make("Expected", { true, true, true, false, false, false, false, false, false })), + input_info, indices_info, output_info, axis, expected) +{ + const Status status = CLGather::validate(&input_info.clone()->set_is_resizable(true), &indices_info.clone()->set_is_resizable(true), &output_info.clone()->set_is_resizable(true), axis); + ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + +DATA_TEST_CASE(Configuration, + framework::DatasetMode::ALL, + combine(arm_compute::test::datasets::SmallGatherDataset(), framework::dataset::make("DataType", { DataType::F16, DataType::F32 })), + input_shape, indices_shape, axis, data_type) +{ + const uint32_t actual_axis = wrap_around(axis, static_cast(input_shape.num_dimensions())); + CLTensor src = create_tensor(input_shape, data_type); + CLTensor indices = create_tensor(indices_shape, DataType::U32); + TensorShape dst_shape = arm_compute::misc::shape_calculator::compute_gather_shape(input_shape, indices_shape, actual_axis); + CLTensor dst = create_tensor(dst_shape, data_type); + + // Create and Configure function + CLGather gather; + gather.configure(&src, &indices, &dst, axis); + + // Validate valid region + const ValidRegion valid_region = shape_to_valid_region(dst.info()->tensor_shape()); + validate(dst.info()->valid_region(), valid_region); +} + +template +using CLGatherFixture = GatherFixture; + +TEST_SUITE(Float) +TEST_SUITE(FP16) +FIXTURE_DATA_TEST_CASE(RunSmall, + CLGatherFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallGatherDataset(), framework::dataset::make("DataType", DataType::F16))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, + CLGatherFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeGatherDataset(), framework::dataset::make("DataType", DataType::F16))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // FP16 + +TEST_SUITE(FP32) +FIXTURE_DATA_TEST_CASE(RunSmall, + CLGatherFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallGatherDataset(), framework::dataset::make("DataType", DataType::F32))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, + CLGatherFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeGatherDataset(), framework::dataset::make("DataType", DataType::F32))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // FP32 +TEST_SUITE_END() // Float + +TEST_SUITE(U8) +FIXTURE_DATA_TEST_CASE(RunSmall, + CLGatherFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallGatherDataset(), framework::dataset::make("DataType", DataType::U8))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, + CLGatherFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeGatherDataset(), framework::dataset::make("DataType", DataType::U8))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // U8 + +TEST_SUITE(U16) +FIXTURE_DATA_TEST_CASE(RunSmall, + CLGatherFixture, + framework::DatasetMode::PRECOMMIT, + combine(datasets::SmallGatherDataset(), framework::dataset::make("DataType", DataType::U16))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} + +FIXTURE_DATA_TEST_CASE(RunLarge, + CLGatherFixture, + framework::DatasetMode::NIGHTLY, + combine(datasets::LargeGatherDataset(), framework::dataset::make("DataType", DataType::U16))) +{ + // Validate output + validate(CLAccessor(_target), _reference); +} +TEST_SUITE_END() // U16 + +TEST_SUITE_END() // Gather +TEST_SUITE_END() // CL +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/fixtures/GatherFixture.h b/tests/validation/fixtures/GatherFixture.h new file mode 100644 index 0000000000..f2dcd4a1a4 --- /dev/null +++ b/tests/validation/fixtures/GatherFixture.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2018-2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef ARM_COMPUTE_TEST_GATHER_FIXTURE +#define ARM_COMPUTE_TEST_GATHER_FIXTURE + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" + +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "tests/AssetsLibrary.h" +#include "tests/Globals.h" +#include "tests/IAccessor.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/Helpers.h" +#include "tests/validation/reference/Gather.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template +class GatherFixture : public framework::Fixture +{ +public: + template + void setup(TensorShape input_shape, TensorShape indices_shape, int axis, DataType data_type) + { + _target = compute_target(input_shape, data_type, axis, indices_shape); + _reference = compute_reference(input_shape, data_type, axis, indices_shape); + } + +protected: + template + void fill(U &&tensor) + { + library->fill_tensor_uniform(tensor, 0); + } + + template + void generate_indices(U &&indices, const TensorShape &input_shape, uint32_t actual_axis, TensorShape indices_shape) + { + std::mt19937 gen(library->seed()); + uint32_t *indices_ptr = static_cast(indices.data()); + + std::uniform_int_distribution dist_index(0, input_shape[actual_axis] - 1); + //Let's consider 1D indices + for(unsigned int ind = 0; ind < indices_shape[0]; ind++) + { + indices_ptr[ind] = dist_index(gen); + } + } + + TensorType compute_target(const TensorShape &input_shape, + DataType data_type, + int axis, + const TensorShape indices_shape) + { + // Create tensors + TensorType src = create_tensor(input_shape, data_type); + TensorType indices_tensor = create_tensor(indices_shape, DataType::U32); + const uint32_t actual_axis = wrap_around(axis, static_cast(input_shape.num_dimensions())); + TensorShape output_shape = arm_compute::misc::shape_calculator::compute_gather_shape(input_shape, indices_shape, actual_axis); + TensorType dst = create_tensor(output_shape, data_type); + + // Create and configure function + FunctionType gather; + gather.configure(&src, &indices_tensor, &dst, axis); + + ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(indices_tensor.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Allocate tensors + src.allocator()->allocate(); + indices_tensor.allocator()->allocate(); + dst.allocator()->allocate(); + + ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!indices_tensor.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Fill tensors + fill(AccessorType(src)); + generate_indices(AccessorType(indices_tensor), input_shape, actual_axis, indices_shape); + + // Compute function + gather.run(); + + return dst; + } + + SimpleTensor compute_reference(const TensorShape &input_shape, + DataType data_type, + int axis, + const TensorShape indices_shape) + { + // Create reference tensor + SimpleTensor src{ input_shape, data_type }; + SimpleTensor indices_tensor{ indices_shape, DataType::U32 }; + const uint32_t actual_axis = wrap_around(axis, static_cast(input_shape.num_dimensions())); + + // Fill reference tensor + fill(src); + generate_indices(indices_tensor, input_shape, actual_axis, indices_shape); + + return reference::gather(src, indices_tensor, actual_axis); + } + + TensorType _target{}; + SimpleTensor _reference{}; +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute + +#endif /* ARM_COMPUTE_TEST_GATHER_FIXTURE */ diff --git a/tests/validation/reference/Gather.cpp b/tests/validation/reference/Gather.cpp new file mode 100644 index 0000000000..ab5ea2f92e --- /dev/null +++ b/tests/validation/reference/Gather.cpp @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018-2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "Gather.h" + +#include "arm_compute/core/Types.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "tests/validation/Helpers.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ +template +SimpleTensor gather(const SimpleTensor &src, const SimpleTensor &indices, uint32_t actual_axis) +{ + const auto *indices_ptr = static_cast(indices.data()); + const TensorShape dst_shape = arm_compute::misc::shape_calculator::compute_gather_shape(src.shape(), indices.shape(), actual_axis); + SimpleTensor dst(dst_shape, src.data_type()); + + Window win; + win.use_tensor_dimensions(dst_shape); + execute_window_loop(win, [&](const Coordinates & id) + { + Coordinates offset; + for(unsigned int dim = 0; dim < id.num_dimensions(); ++dim) + { + if(dim == actual_axis) + { + offset.set(dim, indices_ptr[id[dim]]); + } + else + { + offset.set(dim, id[dim]); + } + } + *reinterpret_cast(dst(id)) = *reinterpret_cast(src(offset)); + }); + + return dst; +} + +template SimpleTensor gather(const SimpleTensor &src, const SimpleTensor &indices, uint32_t actual_axis); +template SimpleTensor gather(const SimpleTensor &src, const SimpleTensor &indices, uint32_t actual_axis); +template SimpleTensor gather(const SimpleTensor &src, const SimpleTensor &indices, uint32_t actual_axis); +template SimpleTensor gather(const SimpleTensor &src, const SimpleTensor &indices, uint32_t actual_axis); +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute \ No newline at end of file diff --git a/tests/validation/reference/Gather.h b/tests/validation/reference/Gather.h new file mode 100644 index 0000000000..54e1cb8696 --- /dev/null +++ b/tests/validation/reference/Gather.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018-2019 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_TEST_GATHER_H__ +#define __ARM_COMPUTE_TEST_GATHER_H__ + +#include "arm_compute/core/Types.h" +#include "tests/SimpleTensor.h" +#include "tests/validation/Helpers.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ +template +SimpleTensor gather(const SimpleTensor &src, const SimpleTensor &indices, uint32_t actual_axis); +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute + +#endif /* __ARM_COMPUTE_TEST_GATHER_H__ */ -- cgit v1.2.1