aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2021-01-11 15:31:15 +0000
committerGiorgio Arena <giorgio.arena@arm.com>2021-01-13 14:01:40 +0000
commit3d3a01c2122278febd6e77ec255d2557d0005546 (patch)
tree837f7eb76afed5b56af6d64db00ef69e122ee2d2 /src
parentd23a251df7b248067e06d5559e985ae1c523be27 (diff)
downloadComputeLibrary-3d3a01c2122278febd6e77ec255d2557d0005546.tar.gz
Remove padding for CLArgMinMaxLayerKernel and fix CLRange mismatches
- Cast the destination pointer to (__global DATA_TYPE*) when VEC_SIZE == 1 in range.cl Resolves: COMPMID-3906, COMPMID-4093 Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Change-Id: Ic0a334d98785ea434ed81f89dbe34e7674991f82 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4792 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/CL/cl_kernels/arg_min_max.cl105
-rw-r--r--src/core/CL/cl_kernels/range.cl14
-rw-r--r--src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp66
3 files changed, 77 insertions, 108 deletions
diff --git a/src/core/CL/cl_kernels/arg_min_max.cl b/src/core/CL/cl_kernels/arg_min_max.cl
index 5184e0c5b8..b28b15b73e 100644
--- a/src/core/CL/cl_kernels/arg_min_max.cl
+++ b/src/core/CL/cl_kernels/arg_min_max.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,6 +23,11 @@
*/
#include "helpers.h"
+#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DATA_TYPE_OUTPUT) && defined(DATA_TYPE_SELECT)
+
+#define VEC_TYPE_IN VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+#define VEC_TYPE_OUT VEC_DATA_TYPE(DATA_TYPE_OUTPUT, VEC_SIZE)
+
#if defined(FLOAT_DATA_TYPE)
#define ISGREATER(x, y) isgreater(x, y)
#define ISLESS(x, y) isless(x, y)
@@ -31,8 +36,8 @@
#define ISGREATER(x, y) (x > y) ? 1 : 0
#define ISLESS(x, y) (x < y) ? 1 : 0
#else // !defined(WIDTH)
-#define ISGREATER(x, y) select((VEC_DATA_TYPE(DATA_TYPE_SELECT, 16))0, (VEC_DATA_TYPE(DATA_TYPE_SELECT, 16)) - 1, x > y)
-#define ISLESS(x, y) select((VEC_DATA_TYPE(DATA_TYPE_SELECT, 16))0, (VEC_DATA_TYPE(DATA_TYPE_SELECT, 16)) - 1, x < y)
+#define ISGREATER(x, y) select((VEC_DATA_TYPE(DATA_TYPE_SELECT, VEC_SIZE))0, (VEC_DATA_TYPE(DATA_TYPE_SELECT, VEC_SIZE)) - 1, x > y)
+#define ISLESS(x, y) select((VEC_DATA_TYPE(DATA_TYPE_SELECT, VEC_SIZE))0, (VEC_DATA_TYPE(DATA_TYPE_SELECT, VEC_SIZE)) - 1, x < y)
#endif // defined(WIDTH)
#endif // defined(FLOAT_DATA_TYPE)
@@ -44,7 +49,6 @@
#error "Unsupported reduction operation!"
#endif // defined(ARG_MAX)
-#if defined(DATA_TYPE_OUTPUT) && defined(DATA_TYPE_SELECT)
#if defined(WIDTH)
#if defined(ARG_MIN)
#if defined(PREV_OUTPUT)
@@ -293,16 +297,17 @@ __kernel void arg_min_max_x(
/** This kernel performs reduction on y-axis.
*
* @note The input data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note The data type of the output must be passed at compile time using -DDATA_TYPE_OUTPUT: e.g. -DDATA_TYPE_OUTPUT=uint
* @note The data type of the select results must be passed at compile time using -DDATA_TYPE_SELECT: e.g. -DDATA_TYPE_SELECT=int
* @note The height size must be passed at compile time using -DHEIGHT e.g. -DHEIGHT=128
*
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/S32/F16/F32
- * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/S32/F16/F32
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
* @param[in] output_ptr The local buffer to hold sumed values. Supported data types: U32/S32
* @param[in] output_stride_x Stride of the output tensor in X dimension (in bytes)
* @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
@@ -311,30 +316,28 @@ __kernel void arg_min_max_x(
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
*/
__kernel void arg_min_max_y(
- IMAGE_DECLARATION(src),
+ IMAGE_DECLARATION(input),
IMAGE_DECLARATION(output))
{
- Image src = CONVERT_TO_IMAGE_STRUCT(src);
- Image output = CONVERT_TO_IMAGE_STRUCT(output);
+ const int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
- VEC_DATA_TYPE(DATA_TYPE, 16)
- res = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, 0)), VEC_DATA_TYPE(DATA_TYPE, 16));
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * input_stride_y;
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE_OUTPUT) + get_global_id(1) * output_stride_y;
- VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
- indx = 0;
+ VEC_TYPE_IN res = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr), VEC_TYPE_IN);
+
+ VEC_TYPE_OUT indx0 = 0;
for(unsigned int y = 1; y < HEIGHT; ++y)
{
- VEC_DATA_TYPE(DATA_TYPE, 16)
- in = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, y)), VEC_DATA_TYPE(DATA_TYPE, 16));
+ VEC_TYPE_IN in = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + y * input_stride_y)), VEC_TYPE_IN);
- VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
- cond_conv = CONVERT(CONDITION_TO_USE(in, res), VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16));
- indx = select(indx, y, cond_conv);
- res = select(res, in, CONDITION_TO_USE(in, res));
+ VEC_TYPE_OUT cond_conv = CONVERT(CONDITION_TO_USE(in, res), VEC_TYPE_OUT);
+ indx0 = select(indx0, y, cond_conv);
+ res = select(res, in, CONDITION_TO_USE(in, res));
}
// Store result
- vstore16(indx, 0, (__global DATA_TYPE_OUTPUT *)output.ptr);
+ STORE_VECTOR_SELECT(indx, DATA_TYPE_OUTPUT, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
}
#endif // defined(HEIGHT)
@@ -342,6 +345,7 @@ __kernel void arg_min_max_y(
/** This kernel performs reduction on z-axis.
*
* @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note The data type of the select results must be passed at compile time using -DDATA_TYPE_SELECT: e.g. -DDATA_TYPE_SELECT=int
* @note The depth size must be passed at compile time using -DDEPTH e.g. -DDEPTH=128
*
@@ -366,27 +370,25 @@ __kernel void arg_min_max_z(
TENSOR3D_DECLARATION(input),
TENSOR3D_DECLARATION(output))
{
- Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+ const int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
- VEC_DATA_TYPE(DATA_TYPE, 16)
- res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE, 16));
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * input_stride_y + get_global_id(2) * input_stride_z;
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE_OUTPUT) + get_global_id(1) * output_stride_y + get_global_id(2) * output_stride_z;
- VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
- indx = 0;
+ VEC_TYPE_IN res = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr), VEC_TYPE_IN);
+
+ VEC_TYPE_OUT indx0 = 0;
for(DATA_TYPE_OUTPUT z = 1; z < DEPTH; ++z)
{
- VEC_DATA_TYPE(DATA_TYPE, 16)
- in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, z)), VEC_DATA_TYPE(DATA_TYPE, 16));
+ VEC_TYPE_IN in = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + z * input_stride_z)), VEC_TYPE_IN);
- VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
- cond_conv = CONVERT(CONDITION_TO_USE(in, res), VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16));
- indx = select(indx, z, cond_conv);
- res = select(res, in, CONDITION_TO_USE(in, res));
+ VEC_TYPE_OUT cond_conv = CONVERT(CONDITION_TO_USE(in, res), VEC_TYPE_OUT);
+ indx0 = select(indx0, z, cond_conv);
+ res = select(res, in, CONDITION_TO_USE(in, res));
}
// Store result
- vstore16(indx, 0, (__global DATA_TYPE_OUTPUT *)output.ptr);
+ STORE_VECTOR_SELECT(indx, DATA_TYPE_OUTPUT, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
}
#endif /* defined(DEPTH) */
@@ -394,6 +396,7 @@ __kernel void arg_min_max_z(
/** This kernel performs reduction on w-axis.
*
* @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE
* @note The data type of the select results must be passed at compile time using -DDATA_TYPE_SELECT: e.g. -DDATA_TYPE_SELECT=int
* @note The batch size must be passed at compile time using -DBATCH e.g. -DBATCH=128
* @note The depth size must be passed at compile time using -DBATCH e.g. -DDEPTH=128
@@ -423,27 +426,27 @@ __kernel void arg_min_max_w(
TENSOR4D_DECLARATION(input),
TENSOR4D_DECLARATION(output))
{
- Tensor4D input = CONVERT_TO_TENSOR4D_STRUCT(input, DEPTH);
- Tensor4D output = CONVERT_TO_TENSOR4D_STRUCT(output, DEPTH);
+ const int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
- VEC_DATA_TYPE(DATA_TYPE, 16)
- res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE, 16));
+ __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + get_global_id(1) * input_stride_y + (get_global_id(2) % DEPTH) * input_stride_z +
+ (get_global_id(2) / DEPTH) * input_stride_w;
+ __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE_OUTPUT) + get_global_id(1) * output_stride_y + (get_global_id(
+ 2) % DEPTH) * output_stride_z + (get_global_id(2) / DEPTH) * output_stride_w;
- VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
- indx = 0;
+ VEC_TYPE_IN res = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr), VEC_TYPE_IN);
+
+ VEC_TYPE_OUT indx0 = 0;
for(DATA_TYPE_OUTPUT w = 1; w < BATCH; ++w)
{
- VEC_DATA_TYPE(DATA_TYPE, 16)
- in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, w)), VEC_DATA_TYPE(DATA_TYPE, 16));
+ VEC_TYPE_IN in = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(input_addr + w * input_stride_w)), VEC_TYPE_IN);
- VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
- cond_conv = CONVERT(CONDITION_TO_USE(in, res), VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16));
- indx = select(indx, w, cond_conv);
- res = select(res, in, CONDITION_TO_USE(in, res));
+ VEC_TYPE_OUT cond_conv = CONVERT(CONDITION_TO_USE(in, res), VEC_TYPE_OUT);
+ indx0 = select(indx0, w, cond_conv);
+ res = select(res, in, CONDITION_TO_USE(in, res));
}
// Store result
- vstore16(indx, 0, (__global DATA_TYPE_OUTPUT *)output.ptr);
+ STORE_VECTOR_SELECT(indx, DATA_TYPE_OUTPUT, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0);
}
#endif /* defined(BATCH) && defined(DEPTH) */
-#endif /* defined(DATA_TYPE_OUTPUT) && defined(DATA_TYPE_SELECT) */ \ No newline at end of file
+#endif // defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DATA_TYPE_OUTPUT) && defined(DATA_TYPE_SELECT) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/range.cl b/src/core/CL/cl_kernels/range.cl
index 467b962b0d..d25d10e207 100644
--- a/src/core/CL/cl_kernels/range.cl
+++ b/src/core/CL/cl_kernels/range.cl
@@ -61,7 +61,7 @@ __kernel void range(
DATA_TYPE seq;
seq = (DATA_TYPE)START + (DATA_TYPE)id * (DATA_TYPE)STEP;
- *dst_ptr = seq;
+ *(__global DATA_TYPE *)dst_ptr = seq;
#else // VECTOR_SIZE == 1
VEC_DATA_TYPE(DATA_TYPE, VECTOR_SIZE)
seq0 = ((DATA_TYPE)START + (DATA_TYPE)id * (DATA_TYPE)STEP);
@@ -108,18 +108,18 @@ __kernel void range_quantized(
__global uchar *dst_ptr = out_ptr + out_offset_first_element_in_bytes + id * sizeof(DATA_TYPE);
#if VECTOR_SIZE == 1
float seq;
- seq = (float)START + (float)id * (float)STEP;
- seq = (DATA_TYPE)(int)(seq / ((float)SCALE_OUT) + (float)OFFSET_OUT);
- seq = max(0.0f, min(seq, 255.0f));
- *dst_ptr = CONVERT_SAT(CONVERT_DOWN(seq, int), uchar);
+ seq = (float)START + (float)id * (float)STEP;
+ seq = (DATA_TYPE)(int)(seq / ((float)SCALE_OUT) + (float)OFFSET_OUT);
+ seq = max(0.0f, min(seq, 255.0f));
+ *(__global DATA_TYPE *)dst_ptr = CONVERT_SAT(CONVERT_DOWN(seq, int), DATA_TYPE);
#else // VECTOR_SIZE == 1
VEC_DATA_TYPE(float, VECTOR_SIZE)
seq = (float)START + id * (float)STEP;
seq = seq + STEP_VEC;
seq = seq / ((VEC_DATA_TYPE(float, VECTOR_SIZE))((float)SCALE_OUT)) + ((VEC_DATA_TYPE(float, VECTOR_SIZE))((float)OFFSET_OUT));
seq = max((VEC_DATA_TYPE(float, VECTOR_SIZE))(0.0f), min(seq, (VEC_DATA_TYPE(float, VECTOR_SIZE))(255.0f)));
- VEC_DATA_TYPE(uchar, VECTOR_SIZE)
- res0 = CONVERT_SAT(CONVERT_DOWN(seq, VEC_DATA_TYPE(int, VECTOR_SIZE)), VEC_DATA_TYPE(uchar, VECTOR_SIZE));
+ VEC_DATA_TYPE(DATA_TYPE, VECTOR_SIZE)
+ res0 = CONVERT_SAT(CONVERT_DOWN(seq, VEC_DATA_TYPE(int, VECTOR_SIZE)), VEC_DATA_TYPE(DATA_TYPE, VECTOR_SIZE));
STORE_VECTOR_SELECT(res, DATA_TYPE, dst_ptr, VECTOR_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
#endif // VECTOR_SIZE == 1
}
diff --git a/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp b/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
index 0e6fc6599c..8215d3ce07 100644
--- a/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
+++ b/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,6 @@
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
-#include "src/core/AccessWindowStatic.h"
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
@@ -41,8 +40,6 @@ namespace arm_compute
{
namespace
{
-constexpr unsigned int vector_size = 16;
-
Status validate_arguments(const ITensorInfo *input, const ITensorInfo *prev_output, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
@@ -67,47 +64,6 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *prev_outp
return Status{};
}
-
-std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *prev_output, ITensorInfo *output, unsigned int axis, ReductionOperation op)
-{
- ARM_COMPUTE_UNUSED(op);
- // Output tensor auto initialization if not yet initialized
- TensorShape output_shape{ input->tensor_shape() };
- output_shape.set(axis, 1);
- DataType output_data_type = DataType::S32;
- auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true));
-
- Window win = calculate_max_window((prev_output != nullptr) ? (*prev_output) : (*input), Steps(vector_size));
- bool window_changed = false;
-
- switch(axis)
- {
- case 0:
- {
- ITensorInfo *input_tensor_access = prev_output != nullptr ? prev_output : input;
- AccessWindowStatic input_access(input_tensor_access, 0, 0, static_cast<int>(input_tensor_access->dimension(0)), 1);
- AccessWindowHorizontal output_access(output, 0, 1);
- window_changed = update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
- }
- break;
- case 1:
- case 2:
- case 3:
- {
- AccessWindowHorizontal input_access(input, 0, vector_size);
- AccessWindowHorizontal output_access(output, 0, vector_size);
- window_changed = update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
- }
- break;
- default:
- ARM_COMPUTE_ERROR("Not supported");
- }
-
- Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
- return std::make_tuple(err, win);
-}
} // namespace
CLArgMinMaxLayerKernel::CLArgMinMaxLayerKernel()
@@ -123,9 +79,14 @@ void CLArgMinMaxLayerKernel::configure(const ICLTensor *input, const ICLTensor *
void CLArgMinMaxLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+ TensorShape output_shape{ input->info()->tensor_shape() };
+ output_shape.set(axis, 1);
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape).set_data_type(DataType::S32).reset_padding().set_is_resizable(true));
+
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr, output->info(), axis, op));
- auto win_config = validate_and_configure_window(input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr, output->info(), axis, op);
- ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
+
+ auto padding_info = get_padding_info({ input, prev_output, output });
_input = input;
_prev_output = prev_output;
@@ -134,10 +95,13 @@ void CLArgMinMaxLayerKernel::configure(const CLCompileContext &compile_context,
_op = op;
// Set build options
- CLBuildOptions build_opts;
+ const auto vector_size = (axis == 0) ? 16U : adjust_vec_size(16U, input->info()->dimension(0));
+ CLBuildOptions build_opts;
build_opts.add_option_if(_prev_output != nullptr, "-DPREV_OUTPUT");
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % vector_size));
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vector_size));
build_opts.add_option_if(is_data_type_float(input->info()->data_type()), "-DFLOAT_DATA_TYPE");
build_opts.add_option_if_else(op == ReductionOperation::ARG_IDX_MAX, "-DARG_MAX", "-DARG_MIN");
build_opts.add_option("-DDATA_TYPE_OUTPUT=" + get_cl_type_from_data_type(output->info()->data_type()));
@@ -176,13 +140,15 @@ void CLArgMinMaxLayerKernel::configure(const CLCompileContext &compile_context,
_kernel = create_kernel(compile_context, "arg_min_max_" + kernel_axis_name, build_opts.options());
// Configure kernel window
- ICLKernel::configure_internal(std::get<1>(win_config), lws_hint);
+ Window win = calculate_max_window((prev_output != nullptr) ? (*prev_output->info()) : (*input->info()), Steps(vector_size));
+ ICLKernel::configure_internal(win, lws_hint);
+
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
Status CLArgMinMaxLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *prev_output, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, prev_output, output, axis, op));
- ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), (prev_output != nullptr) ? prev_output->clone().get() : nullptr, output->clone().get(), axis, op)));
return Status{};
}