aboutsummaryrefslogtreecommitdiff
path: root/src/core/CL
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2017-08-08 17:42:38 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit04f089cbcb4407e8d2883525edb661ba15ea922d (patch)
tree09ad467ee43cf8ca2feacfb7c9849b1888c15c1d /src/core/CL
parent39b4e4dba05988240ff06c87d3c4cf4d7f63bcf2 (diff)
downloadComputeLibrary-04f089cbcb4407e8d2883525edb661ba15ea922d.tar.gz
COMPMID-476 L2 Normalization for CL
Change-Id: I88f87173645880eb823916c5d4ac884c372a4fb4 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/83269 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'src/core/CL')
-rw-r--r--src/core/CL/CLKernelLibrary.cpp10
-rw-r--r--src/core/CL/cl_kernels/l2_normalize.cl61
-rw-r--r--src/core/CL/cl_kernels/reduction_operation.cl108
-rw-r--r--src/core/CL/kernels/CLL2NormalizeKernel.cpp110
-rw-r--r--src/core/CL/kernels/CLReductionOperationKernel.cpp139
5 files changed, 428 insertions, 0 deletions
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index ce2cfef67a..03c9903d6d 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -204,6 +204,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "IYUV_to_RGB888_bt709", "color_convert.cl" },
{ "IYUV_to_RGBA8888_bt709", "color_convert.cl" },
{ "IYUV_to_YUV444_bt709", "color_convert.cl" },
+ { "l2_normalize", "l2_normalize.cl" },
{ "lktracker_stage0", "optical_flow_pyramid_lk.cl" },
{ "lktracker_stage1", "optical_flow_pyramid_lk.cl" },
{ "magnitude_phase", "magnitude_phase.cl" },
@@ -235,6 +236,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "pooling_layer_3", "pooling_layer.cl" },
{ "pooling_layer_3_optimized", "pooling_layer.cl" },
{ "pooling_layer_7", "pooling_layer.cl" },
+ { "reduction_operation", "reduction_operation.cl" },
{ "remap_nearest_neighbour", "remap.cl" },
{ "remap_bilinear", "remap.cl" },
{ "reshape_to_columns", "convolution_layer.cl" },
@@ -422,6 +424,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
#include "./cl_kernels/integral_image.clembed"
},
{
+ "l2_normalize.cl",
+#include "./cl_kernels/l2_normalize.clembed"
+ },
+ {
"magnitude_phase.cl",
#include "./cl_kernels/magnitude_phase.clembed"
},
@@ -474,6 +480,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
#include "./cl_kernels/pooling_layer.clembed"
},
{
+ "reduction_operation.cl",
+#include "./cl_kernels/reduction_operation.clembed"
+ },
+ {
"remap.cl",
#include "./cl_kernels/remap.clembed"
},
diff --git a/src/core/CL/cl_kernels/l2_normalize.cl b/src/core/CL/cl_kernels/l2_normalize.cl
new file mode 100644
index 0000000000..8d47631019
--- /dev/null
+++ b/src/core/CL/cl_kernels/l2_normalize.cl
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2016, 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+/** This kernel performs reduction given an operation.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The data size must be passed at compile time using -DDATA_SIZE e.g. -DDATA_SIZE=32
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: QS8/F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] sum_ptr Pointer to the source tensor. Supported data types: QS8/F16/F32
+ * @param[in] sum_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] epsilon Epsilon value
+ */
+__kernel void l2_normalize(
+ VECTOR_DECLARATION(src),
+ VECTOR_DECLARATION(sum),
+ VECTOR_DECLARATION(dst),
+ DATA_TYPE epsilon)
+{
+ Vector src = CONVERT_TO_VECTOR_STRUCT(src);
+ Vector sum = CONVERT_TO_VECTOR_STRUCT(sum);
+ Vector dst = CONVERT_TO_VECTOR_STRUCT(dst);
+
+ VEC_DATA_TYPE(DATA_TYPE, 16)
+ in = vload16(0, (__global DATA_TYPE *)src.ptr);
+ VEC_DATA_TYPE(DATA_TYPE, 16)
+ normalize_value = (VEC_DATA_TYPE(DATA_TYPE, 16))native_rsqrt(fmax(((__global DATA_TYPE *)sum.ptr)[0], epsilon));
+
+ vstore16(in * normalize_value, 0, (__global DATA_TYPE *)dst.ptr);
+} \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/reduction_operation.cl b/src/core/CL/cl_kernels/reduction_operation.cl
new file mode 100644
index 0000000000..d46a22600f
--- /dev/null
+++ b/src/core/CL/cl_kernels/reduction_operation.cl
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2016, 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+/** Calculate square sum of a vector
+ *
+ * @param[in] input Pointer to the first pixel.
+ *
+ * @return square sum of vector.
+ */
+inline DATA_TYPE square_sum(__global const DATA_TYPE *input)
+{
+ VEC_DATA_TYPE(DATA_TYPE, 16)
+ in = vload16(0, input);
+
+ in *= in;
+
+ in.s01234567 += in.s89ABCDEF;
+ in.s0123 += in.s4567;
+ in.s01 += in.s23;
+
+ return (in.s0 + in.s1);
+}
+
+/** Calculate sum of a vector
+ *
+ * @param[in] input Pointer to the first pixel.
+ *
+ * @return sum of vector.
+ */
+inline DATA_TYPE sum(__global const DATA_TYPE *input)
+{
+ VEC_DATA_TYPE(DATA_TYPE, 16)
+ in = vload16(0, input);
+
+ in.s01234567 += in.s89ABCDEF;
+ in.s0123 += in.s4567;
+ in.s01 += in.s23;
+
+ return (in.s0 + in.s1);
+}
+
+/** This kernel performs reduction given an operation.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The data size must be passed at compile time using -DDATA_SIZE e.g. -DDATA_SIZE=32
+ * @note The operation we want to perform must be passed at compile time using -DOPERATION e.g. -DOPERATION=square_sum
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] partial_sum_ptr The local buffer to hold sumed values. Supported data types: same as @p src_ptt
+ * @param[in] partial_sum_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] partial_sum_step_x partial_sum_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] partial_sum_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] local_sums Local buffer for storing the partioal sum
+ */
+__kernel void reduction_operation(
+ VECTOR_DECLARATION(src),
+ VECTOR_DECLARATION(partial_sum),
+ __local DATA_TYPE *local_sums)
+{
+ Vector src = CONVERT_TO_VECTOR_STRUCT(src);
+ Vector partial_sum = CONVERT_TO_VECTOR_STRUCT(partial_sum);
+
+ unsigned int lsize = get_local_size(0);
+ unsigned int lid = get_local_id(0);
+
+ local_sums[lid] = OPERATION((__global DATA_TYPE *)src.ptr);
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ // Perform parallel reduction
+ for(unsigned int i = lsize >> 1; i > 0; i >>= 1)
+ {
+ if(lid < i)
+ {
+ local_sums[lid] += local_sums[lid + i];
+ }
+ barrier(CLK_LOCAL_MEM_FENCE);
+ }
+
+ if(lid == 0)
+ {
+ ((__global DATA_TYPE *)partial_sum.ptr + get_group_id(0))[0] = local_sums[0];
+ }
+} \ No newline at end of file
diff --git a/src/core/CL/kernels/CLL2NormalizeKernel.cpp b/src/core/CL/kernels/CLL2NormalizeKernel.cpp
new file mode 100644
index 0000000000..3e0758c980
--- /dev/null
+++ b/src/core/CL/kernels/CLL2NormalizeKernel.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLL2NormalizeKernel.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/FixedPoint.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute;
+
+CLL2NormalizeKernel::CLL2NormalizeKernel()
+ : _input(nullptr), _sum(nullptr), _output(nullptr), _axis(0), _epsilon(1e-12)
+{
+}
+
+void CLL2NormalizeKernel::configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, unsigned int axis, float epsilon)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(output);
+
+ // Sum and output tensor auto initialization if not yet initialized
+ auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, input->info()->data_type(), input->info()->fixed_point_position());
+
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
+ ARM_COMPUTE_ERROR_ON_MSG(axis > 0, "Unsupported reduction axis, Supported axis is 0");
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
+
+ _input = input;
+ _sum = sum;
+ _output = output;
+ _axis = axis;
+ _epsilon = epsilon;
+
+ const unsigned int num_elems_processed_per_iteration = 16;
+
+ // Set build options
+ std::set<std::string> build_opts;
+ build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
+ build_opts.emplace(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)));
+
+ // Create kernel
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("l2_normalize", build_opts));
+
+ // Set epsilon argument
+ unsigned int idx = num_arguments_per_1D_tensor() * 3;
+ _kernel.setArg<cl_uint>(idx, _epsilon);
+
+ // Configure kernel window
+ Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
+
+ AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
+
+ update_window_and_padding(win, input_access, output_access);
+ output_access.set_valid_region(win, input->info()->valid_region());
+
+ ICLKernel::configure(win);
+}
+
+void CLL2NormalizeKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+
+ Window window_sum(window);
+ window_sum.set(Window::DimX, Window::Dimension(0, 0, 0));
+
+ Window in_slice = window.first_slice_window_1D();
+ Window sum_slice = window_sum.first_slice_window_1D();
+
+ do
+ {
+ unsigned int idx = 0;
+ add_1D_tensor_argument(idx, _input, in_slice);
+ add_1D_tensor_argument(idx, _sum, sum_slice);
+ add_1D_tensor_argument(idx, _output, in_slice);
+ enqueue(queue, *this, in_slice);
+ }
+ while(window.slide_window_slice_1D(in_slice) && window.slide_window_slice_1D(sum_slice));
+}
diff --git a/src/core/CL/kernels/CLReductionOperationKernel.cpp b/src/core/CL/kernels/CLReductionOperationKernel.cpp
new file mode 100644
index 0000000000..7595d8e79b
--- /dev/null
+++ b/src/core/CL/kernels/CLReductionOperationKernel.cpp
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLReductionOperationKernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/FixedPoint.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute;
+
+CLReductionOperationKernel::CLReductionOperationKernel()
+ : _input(nullptr), _output(nullptr), _reduction_axis(0), _op(ReductionOperation::SUM_SQUARE), _border_size()
+{
+}
+
+BorderSize CLReductionOperationKernel::border_size() const
+{
+ return _border_size;
+}
+
+void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(output);
+
+ // Output tensor auto initialization if not yet initialized
+ TensorShape output_shape{ input->info()->tensor_shape() };
+ output_shape.set(axis, 1);
+ auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->fixed_point_position());
+
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
+ ARM_COMPUTE_ERROR_ON_MSG(axis > 0, "Unsupported reduction axis, Supported axis is 0");
+
+ const unsigned int num_elems_processed_per_iteration = 16;
+ const unsigned int border_width = ((input->info()->dimension(0) % 128) != 0) ? 128 - input->info()->dimension(0) % 128 : 0;
+
+ _input = input;
+ _output = output;
+ _reduction_axis = axis;
+ _op = op;
+ _lws_hint = cl::NDRange(8);
+ _border_size = BorderSize(0, border_width, 0, 0);
+
+ // Set build options
+ std::set<std::string> build_opts;
+ build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
+ build_opts.emplace(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)));
+ if(is_data_type_fixed_point(input->info()->data_type()))
+ {
+ build_opts.emplace("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position()));
+ }
+
+ switch(op)
+ {
+ case ReductionOperation::SUM_SQUARE:
+ build_opts.emplace(("-DOPERATION=square_sum"));
+ break;
+ case ReductionOperation::SUM:
+ build_opts.emplace(("-DOPERATION=sum"));
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported reduction operation");
+ }
+
+ // Create kernel
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("reduction_operation", build_opts));
+
+ // Configure kernel window
+ Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
+
+ AccessWindowStatic input_access(input->info(), 0, 0, input->info()->dimension(0) + border_width, 1);
+ AccessWindowHorizontal output_access(output->info(), 0, 1);
+
+ update_window_and_padding(win, input_access, output_access);
+ output_access.set_valid_region(win, output->info()->valid_region());
+
+ ICLKernel::configure(win);
+}
+
+void CLReductionOperationKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+
+ // Set out window
+ Window out_window(window);
+ out_window.set(Window::DimX, Window::Dimension(0, 0, 0));
+
+ // Get first input and output slices
+ Window in_slice = window.first_slice_window_1D();
+ Window out_slice = out_window.first_slice_window_1D();
+
+ // Reshape window
+ const unsigned int border_width = ((in_slice.x().end() % 128) != 0) ? 128 - in_slice.x().end() % 128 : 0;
+ in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start(), in_slice.x().end() + border_width, in_slice.x().step()));
+
+ // Set local sums buffer
+ _kernel.setArg(num_arguments_per_1D_tensor() * 2, _lws_hint[0], nullptr);
+
+ do
+ {
+ unsigned int idx = 0;
+ add_1D_tensor_argument(idx, _input, in_slice);
+ add_1D_tensor_argument(idx, _output, out_slice);
+ enqueue(queue, *this, in_slice, _lws_hint);
+ }
+ while(window.slide_window_slice_1D(in_slice) && window.slide_window_slice_1D(out_slice));
+}