aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/Types.h8
-rw-r--r--arm_compute/runtime/CL/CLFunctions.h1
-rw-r--r--arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h59
-rw-r--r--src/core/CL/CLKernelLibrary.cpp2
-rw-r--r--src/core/CL/cl_kernels/reduction_operation.cl144
-rw-r--r--src/core/CL/kernels/CLReductionOperationKernel.cpp36
-rw-r--r--src/runtime/CL/functions/CLArgMinMaxLayer.cpp48
-rw-r--r--tests/validation/CL/ArgMinMax.cpp138
-rw-r--r--tests/validation/fixtures/ArgMinMaxFixture.h111
-rw-r--r--tests/validation/reference/ReductionOperation.cpp103
-rw-r--r--utils/TypePrinter.h6
11 files changed, 599 insertions, 57 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 5ddd207100..7db2f5fddf 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -545,9 +545,11 @@ enum class NonLinearFilterFunction : unsigned
/** Available reduction operations */
enum class ReductionOperation
{
- SUM_SQUARE, /**< Sum of squares */
- SUM, /**< Sum */
- MEAN_SUM, /**< Mean of sum */
+ SUM_SQUARE, /**< Sum of squares */
+ SUM, /**< Sum */
+ MEAN_SUM, /**< Mean of sum */
+ ARG_IDX_MAX, /**< Index of the max value */
+ ARG_IDX_MIN /**< Index of the min value */
};
/** The normalization type used for the normalization layer */
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index d9b29ff2dc..780597ef07 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -28,6 +28,7 @@
#include "arm_compute/runtime/CL/functions/CLAbsoluteDifference.h"
#include "arm_compute/runtime/CL/functions/CLAccumulate.h"
#include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
+#include "arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h"
#include "arm_compute/runtime/CL/functions/CLArithmeticAddition.h"
#include "arm_compute/runtime/CL/functions/CLArithmeticDivision.h"
#include "arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h"
diff --git a/arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h b/arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h
new file mode 100644
index 0000000000..b3a85948a8
--- /dev/null
+++ b/arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLARGMINMAXLAYER_H__
+#define __ARM_COMPUTE_CLARGMINMAXLAYER_H__
+
+#include "arm_compute/core/CL/kernels/CLReductionOperationKernel.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Function to calculate the index of the minimum or maximum values in a tensor based on an axis. */
+class CLArgMinMaxLayer : public ICLSimpleFunction
+{
+public:
+ /** Set the input and output tensors.
+ *
+ * @param[in] input Input source tensor. Data types supported: F16/F32.
+ * @param[in] axis Axis to find max/min index.
+ * @param[out] output Output source tensor. Data types supported: U32.
+ * @param[in] op Operation to perform: min or max
+ */
+ void configure(const ICLTensor *input, int axis, ICLTensor *output, const ReductionOperation &op);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLArgMinMaxLayer
+ *
+ * @param[in] input Input source tensor info. Data types supported: F16/F32.
+ * @param[in] axis Axis to find max/min index.
+ * @param[in] output Output source tensor info. Data types supported: U32.
+ * @param[in] op Operation to perform: min or max
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, int axis, const ITensorInfo *output, const ReductionOperation &op);
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_CLARGMINMAXLAYER_H__ */
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index a9c4074310..f2b5d45e2c 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -370,7 +370,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "prior_box_layer_nchw", "prior_box_layer.cl" },
{ "quantization_layer", "quantization_layer.cl" },
{ "reduction_operation_x", "reduction_operation.cl" },
- { "reduction_operation_quantized_x", "reduction_operation.cl" },
+ { "reduction_operation_non_parallel_x", "reduction_operation.cl" },
{ "reduction_operation_y", "reduction_operation.cl" },
{ "reduction_operation_z", "reduction_operation.cl" },
{ "reduction_operation_w", "reduction_operation.cl" },
diff --git a/src/core/CL/cl_kernels/reduction_operation.cl b/src/core/CL/cl_kernels/reduction_operation.cl
index d76e12ac04..d1f47beda7 100644
--- a/src/core/CL/cl_kernels/reduction_operation.cl
+++ b/src/core/CL/cl_kernels/reduction_operation.cl
@@ -60,7 +60,7 @@ inline DATA_TYPE sum(__global const DATA_TYPE *input)
return (in.s0 + in.s1);
}
-
+#if defined(OPERATION)
/** This kernel performs parallel reduction given an operation on x-axis.
*
* @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
@@ -120,13 +120,16 @@ __kernel void reduction_operation_x(
}
}
}
+#endif // defined(OPERATION)
#if defined(WIDTH)
-/** This kernel performs reduction on x-axis. (QASYMM8)
+/** This kernel performs reduction on x-axis. (Non parallel)
*
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
* @note The width size must be passed at compile time using -DWIDTH e.g. -DWIDTH=128
+ * @note In case of ARG_MIN and ARG_MAX the condition data type must be passed at compile time using -DCOND_DATA_TYPE e.g. -DCOND_DATA_TYPE=short
*
- * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: F16/F32 and QASYMM8 for operation MEAN
* @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
@@ -135,33 +138,49 @@ __kernel void reduction_operation_x(
* @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
*/
-__kernel void reduction_operation_quantized_x(
+__kernel void reduction_operation_non_parallel_x(
VECTOR_DECLARATION(src),
VECTOR_DECLARATION(output))
{
Vector src = CONVERT_TO_VECTOR_STRUCT(src);
Vector output = CONVERT_TO_VECTOR_STRUCT(output);
- uint res = 0;
+ DATA_TYPE_PROMOTED res = *((__global DATA_TYPE *)vector_offset(&src, 0));
- for(unsigned int x = 0; x < WIDTH; ++x)
+#if defined(ARG_MAX) || defined(ARG_MIN)
+ uint indx = 0;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
+
+ for(unsigned int x = 1; x < WIDTH; ++x)
{
- res += *((__global uchar *)vector_offset(&src, x));
+ DATA_TYPE_PROMOTED in = *((__global DATA_TYPE *)vector_offset(&src, x));
+#if defined(ARG_MAX)
+ indx = select(indx, x, isgreater(in, res));
+ res = select(res, in, CONVERT(isgreater(in, res), COND_DATA_TYPE));
+#elif defined(ARG_MIN)
+ indx = select(indx, x, isless(in, res));
+ res = select(res, in, CONVERT(isless(in, res), COND_DATA_TYPE));
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
+ res += in;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
}
+ // Store result
+#if defined(ARG_MAX) || defined(ARG_MIN)
+ *((__global uint *)output.ptr) = indx;
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
#if defined(MEAN)
res /= WIDTH;
-#endif /* defined(MEAN) */
-
- // Store result
+#endif // defined(MEAN)
*((__global uchar *)output.ptr) = convert_uchar(res);
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
}
-#endif /* defined(HEIGHT) */
+#endif /* defined(WIDTH) */
#if defined(HEIGHT)
/** This kernel performs reduction on y-axis.
*
- * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The input data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
* @note The height size must be passed at compile time using -DHEIGHT e.g. -DHEIGHT=128
*
* @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/F16/F32
@@ -185,24 +204,45 @@ __kernel void reduction_operation_y(
Image output = CONVERT_TO_IMAGE_STRUCT(output);
VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
- res = 0;
+ res = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+#if defined(SUM_SQUARE)
+ res *= res;
+#endif // defined(SUM_SQUARE)
+
+#if defined(ARG_MAX) || defined(ARG_MIN)
+ uint16 indx = 0;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
- for(unsigned int y = 0; y < HEIGHT; ++y)
+ for(unsigned int y = 1; y < HEIGHT; ++y)
{
VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
in = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, y)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+#if defined(ARG_MAX)
+ uint16 cond_conv = CONVERT(isgreater(in, res), uint16);
+ indx = select(indx, y, cond_conv);
+ res = select(res, in, isgreater(in, res));
+#elif defined(ARG_MIN)
+ uint16 cond_conv = CONVERT(isless(in, res), uint16);
+ indx = select(indx, y, cond_conv);
+ res = select(res, in, isless(in, res));
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
#if defined(SUM_SQUARE)
in *= in;
-#endif // SQRSUM
+#endif // defined(SUM_SQUARE)
res += in;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
}
+ // Store result
+#if defined(ARG_MAX) || defined(ARG_MIN)
+ vstore16(indx, 0, (__global uint *)output.ptr);
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
#if defined(MEAN)
res /= HEIGHT;
-#endif /* defined(MEAN) */
-
- // Store result
+#endif // defined(MEAN)
vstore16(CONVERT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
}
#endif /* defined(HEIGHT) */
@@ -237,24 +277,46 @@ __kernel void reduction_operation_z(
Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
- res = 0;
+ res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
- for(unsigned int z = 0; z < DEPTH; ++z)
+#if defined(SUM_SQUARE)
+ res *= res;
+#endif // defined(SUM_SQUARE)
+
+#if defined(ARG_MAX) || defined(ARG_MIN)
+ uint16 indx = 0;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
+
+ for(unsigned int z = 1; z < DEPTH; ++z)
{
VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, z)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+#if defined(ARG_MAX)
+ uint16 cond_conv = CONVERT(isgreater(in, res), uint16);
+ indx = select(indx, z, cond_conv);
+ res = select(res, in, isgreater(in, res));
+#elif defined(ARG_MIN)
+ uint16 cond_conv = CONVERT(isless(in, res), uint16);
+ indx = select(indx, z, cond_conv);
+ res = select(res, in, isless(in, res));
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
#if defined(SUM_SQUARE)
in *= in;
-#endif // SQRSUM
+#endif // defined(SUM_SQUARE)
res += in;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
}
+ // Store result
+#if defined(ARG_MAX) || defined(ARG_MIN)
+ vstore16(indx, 0, (__global uint *)output.ptr);
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
#if defined(MEAN)
res /= DEPTH;
-#endif /* defined(MEAN) */
-
- // Store result
+#endif // defined(MEAN)
vstore16(CONVERT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
}
#endif /* defined(DEPTH) */
@@ -294,23 +356,45 @@ __kernel void reduction_operation_w(
Tensor4D output = CONVERT_TO_TENSOR4D_STRUCT(output, DEPTH);
VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
- res = 0;
+ res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
- for(unsigned int w = 0; w < BATCH; ++w)
+#if defined(SUM_SQUARE)
+ res *= res;
+#endif // defined(SUM_SQUARE)
+
+#if defined(ARG_MAX) || defined(ARG_MIN)
+ uint16 indx = 0;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
+
+ for(unsigned int w = 1; w < BATCH; ++w)
{
VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, w)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+#if defined(ARG_MAX)
+ uint16 cond_conv = CONVERT(isgreater(in, res), uint16);
+ indx = select(indx, w, cond_conv);
+ res = select(res, in, isgreater(in, res));
+#elif defined(ARG_MIN)
+ uint16 cond_conv = CONVERT(isless(in, res), uint16);
+ indx = select(indx, w, cond_conv);
+ res = select(res, in, isless(in, res));
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
#if defined(SUM_SQUARE)
in *= in;
-#endif // SQRSUM
+#endif // defined(SUM_SQUARE)
res += in;
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
}
+ // Store result
+#if defined(ARG_MAX) || defined(ARG_MIN)
+ vstore16(indx, 0, (__global uint *)output.ptr);
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
#if defined(MEAN)
res /= BATCH;
-#endif /* defined(MEAN) */
-
- // Store result
+#endif // defined(MEAN)
vstore16(CONVERT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
}
#endif /* defined(BATCH) && defined(DEPTH) */ \ No newline at end of file
diff --git a/src/core/CL/kernels/CLReductionOperationKernel.cpp b/src/core/CL/kernels/CLReductionOperationKernel.cpp
index ef46325e4d..f6dc4a8806 100644
--- a/src/core/CL/kernels/CLReductionOperationKernel.cpp
+++ b/src/core/CL/kernels/CLReductionOperationKernel.cpp
@@ -53,19 +53,29 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u
if(output->total_size() != 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
+ if(op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QASYMM8, "Not supported operation for QASYMM8");
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32);
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ }
}
return Status{};
}
-std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, unsigned int axis)
+std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, unsigned int axis, ReductionOperation op)
{
// Output tensor auto initialization if not yet initialized
TensorShape output_shape{ input->tensor_shape() };
output_shape.set(axis, 1);
- auto_init_if_empty(*output, output_shape, 1, input->data_type());
+ const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX);
+ DataType output_data_type = is_arg_min_max ? DataType::U32 : input->data_type();
+ auto_init_if_empty(*output, output_shape, 1, output_data_type);
const unsigned int num_elems_processed_per_iteration = (is_data_type_quantized(input->data_type()) && (axis == 0)) ? 1 : 16;
Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
@@ -136,7 +146,7 @@ void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *ou
// Set build options
CLBuildOptions build_opts;
std::string data_type_promoted = get_cl_type_from_data_type(input->info()->data_type());
- if(is_data_type_quantized(input->info()->data_type()) && axis != 0)
+ if(is_data_type_quantized(input->info()->data_type()))
{
data_type_promoted = "uint";
}
@@ -144,6 +154,8 @@ void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *ou
build_opts.add_option("-DDATA_TYPE_PROMOTED=" + data_type_promoted);
build_opts.add_option_if(op == ReductionOperation::SUM_SQUARE, "-DSUM_SQUARE=");
build_opts.add_option_if(op == ReductionOperation::MEAN_SUM, "-DMEAN");
+ build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MAX, "-DARG_MAX");
+ build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MIN, "-DARG_MIN");
switch(op)
{
@@ -154,6 +166,9 @@ void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *ou
case ReductionOperation::MEAN_SUM:
build_opts.add_option(("-DOPERATION=sum"));
break;
+ case ReductionOperation::ARG_IDX_MAX:
+ case ReductionOperation::ARG_IDX_MIN:
+ break;
default:
ARM_COMPUTE_ERROR("Unsupported reduction operation");
}
@@ -161,11 +176,12 @@ void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *ou
// Create kernel
cl::NDRange lws_hint = CLKernelLibrary::get().default_ndrange();
std::string kernel_axis_name;
+ const bool is_arg_op = (op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN);
switch(axis)
{
case 0:
{
- if(!is_data_type_quantized(input->info()->data_type()))
+ if(!is_data_type_quantized(input->info()->data_type()) && !is_arg_op)
{
build_opts.add_option_if(op == ReductionOperation::MEAN_SUM, "-DWIDTH=" + support::cpp11::to_string(width));
const unsigned int width_leftover = input->info()->dimension(0) % border_val;
@@ -181,7 +197,8 @@ void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *ou
else
{
build_opts.add_option("-DWIDTH=" + support::cpp11::to_string(input->info()->dimension(0)));
- kernel_axis_name = "quantized_x";
+ build_opts.add_option_if_else(_input->info()->data_type() == DataType::F32, "-DCOND_DATA_TYPE=int", "-DCOND_DATA_TYPE=short");
+ kernel_axis_name = "non_parallel_x";
}
}
break;
@@ -204,7 +221,7 @@ void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *ou
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("reduction_operation_" + kernel_axis_name, build_opts.options()));
// Configure kernel window
- auto win_config = validate_and_configure_window(_input->info(), _output->info(), axis);
+ auto win_config = validate_and_configure_window(_input->info(), _output->info(), axis, op);
ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
@@ -214,7 +231,7 @@ void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *ou
Status CLReductionOperationKernel::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, unsigned int width)
{
ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, axis, op, width));
- ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get(), axis)));
+ ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get(), axis, op)));
return Status{};
}
@@ -224,12 +241,13 @@ void CLReductionOperationKernel::run(const Window &window, cl::CommandQueue &que
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+ const bool is_arg_op = (_op == ReductionOperation::ARG_IDX_MAX || _op == ReductionOperation::ARG_IDX_MIN);
switch(_reduction_axis)
{
case 0:
{
// We use parallel reduction only in non quantized types
- if(!is_data_type_quantized(_input->info()->data_type()))
+ if(!is_data_type_quantized(_input->info()->data_type()) && !is_arg_op)
{
// Set out window
Window out_window(window);
diff --git a/src/runtime/CL/functions/CLArgMinMaxLayer.cpp b/src/runtime/CL/functions/CLArgMinMaxLayer.cpp
new file mode 100644
index 0000000000..a6393c57c1
--- /dev/null
+++ b/src/runtime/CL/functions/CLArgMinMaxLayer.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h"
+
+#include "arm_compute/core/CL/kernels/CLReductionOperationKernel.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+
+namespace arm_compute
+{
+void CLArgMinMaxLayer::configure(const ICLTensor *input, int axis, ICLTensor *output, const ReductionOperation &op)
+{
+ auto k = arm_compute::support::cpp14::make_unique<CLReductionOperationKernel>();
+ k->configure(input, output, axis, op);
+ _kernel = std::move(k);
+}
+
+Status CLArgMinMaxLayer::validate(const ITensorInfo *input, int axis, const ITensorInfo *output, const ReductionOperation &op)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(op != ReductionOperation::ARG_IDX_MAX && op != ReductionOperation::ARG_IDX_MIN, "Invalid operation");
+ return CLReductionOperationKernel::validate(input, output, axis, op);
+}
+} // namespace arm_compute \ No newline at end of file
diff --git a/tests/validation/CL/ArgMinMax.cpp b/tests/validation/CL/ArgMinMax.cpp
new file mode 100644
index 0000000000..0b873945d3
--- /dev/null
+++ b/tests/validation/CL/ArgMinMax.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h"
+
+#include "tests/CL/CLAccessor.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/datasets/SplitDataset.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/ArgMinMaxFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(CL)
+TEST_SUITE(ArgMinMax)
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 3U, 16U, 2U), 1, DataType::F32), // Invalid axis
+ TensorInfo(TensorShape(27U, 3U, 16U, 2U), 1, DataType::F32), // Invalid output shape
+ TensorInfo(TensorShape(32U, 16U, 16U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 16U, 16U, 2U), 1, DataType::F32) // Invalid operation
+ }),
+ framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(27U, 3U, 1U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(27U, 3U, 1U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 16U, 1U, 2U), 1, DataType::U32),
+ TensorInfo(TensorShape(32U, 16U, 1U, 2U), 1, DataType::F32)
+ })),
+ framework::dataset::make("Axis", { 4, 0, 2, 0 })),
+ framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MAX, ReductionOperation::ARG_IDX_MAX, ReductionOperation::ARG_IDX_MAX, ReductionOperation::MEAN_SUM })),
+ framework::dataset::make("Expected", { false, false, true, false })),
+ input_info, output_info, axis, operation, expected)
+{
+ const Status status = CLArgMinMaxLayer::validate(&input_info.clone()->set_is_resizable(false), axis, &output_info.clone()->set_is_resizable(false), operation);
+ ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+DATA_TEST_CASE(Configuration,
+ framework::DatasetMode::ALL,
+ combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F16, DataType::F32 })),
+ shape, data_type)
+{
+ // Create tensors
+ CLTensor ref_src = create_tensor<CLTensor>(shape, data_type);
+ CLTensor dst;
+
+ // Create and Configure function
+ CLArgMinMaxLayer arg_min_max_layer;
+ arg_min_max_layer.configure(&ref_src, 1, &dst, ReductionOperation::ARG_IDX_MAX);
+
+ // Validate valid region
+ TensorShape output_shape = shape;
+ output_shape.set(1, 1);
+ const ValidRegion valid_region = shape_to_valid_region(output_shape);
+ validate(dst.info()->valid_region(), valid_region);
+}
+
+template <typename T>
+using CLArgMinMaxValidationFixture = ArgMinMaxValidationFixture<CLTensor, CLAccessor, CLArgMinMaxLayer, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ CLArgMinMaxValidationFixture<half>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge,
+ CLArgMinMaxValidationFixture<half>,
+ framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // FP16
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+ CLArgMinMaxValidationFixture<float>,
+ framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge,
+ CLArgMinMaxValidationFixture<float>,
+ framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // Float
+TEST_SUITE_END() // ArgMinMax
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/ArgMinMaxFixture.h b/tests/validation/fixtures/ArgMinMaxFixture.h
new file mode 100644
index 0000000000..5f5f85c104
--- /dev/null
+++ b/tests/validation/fixtures/ArgMinMaxFixture.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_ARG_MIN_MAX_FIXTURE
+#define ARM_COMPUTE_TEST_ARG_MIN_MAX_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ReductionOperation.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArgMinMaxValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, DataType data_type, int axis, ReductionOperation op)
+ {
+ _target = compute_target(shape, data_type, axis, op);
+ _reference = compute_reference(shape, data_type, axis, op);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor)
+ {
+ std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, 0);
+ }
+
+ TensorType compute_target(TensorShape &src_shape, DataType data_type, int axis, ReductionOperation op)
+ {
+ // Create tensors
+ TensorType src = create_tensor<TensorType>(src_shape, data_type, 1);
+ TensorType dst;
+
+ // Create and configure function
+ FunctionType arg_min_max_layer;
+ arg_min_max_layer.configure(&src, axis, &dst, op);
+
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ src.allocator()->allocate();
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(src));
+
+ // Compute function
+ arg_min_max_layer.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(TensorShape &src_shape, DataType data_type, int axis, ReductionOperation op)
+ {
+ // Create reference
+ SimpleTensor<T> src{ src_shape, data_type, 1 };
+
+ // Fill reference
+ fill(src);
+
+ TensorShape output_shape = src_shape;
+ output_shape.set(axis, 1);
+ return reference::reduction_operation<T>(src, output_shape, axis, op);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_ARG_MIN_MAX_FIXTURE */
diff --git a/tests/validation/reference/ReductionOperation.cpp b/tests/validation/reference/ReductionOperation.cpp
index 2f103a6f65..37a9be86c0 100644
--- a/tests/validation/reference/ReductionOperation.cpp
+++ b/tests/validation/reference/ReductionOperation.cpp
@@ -38,10 +38,10 @@ namespace reference
{
namespace
{
-template <typename T>
-T reduce_operation(T *ptr, int reduce_elements, ReductionOperation op, int stride)
+template <typename T, typename OT>
+OT reduce_operation(const T *ptr, int reduce_elements, ReductionOperation op, int stride)
{
- using type = typename std::remove_cv<T>::type;
+ using type = typename std::remove_cv<OT>::type;
auto res = type(0);
if(std::is_integral<type>::value)
@@ -50,7 +50,31 @@ T reduce_operation(T *ptr, int reduce_elements, ReductionOperation op, int strid
for(int i = 0; i < reduce_elements; ++i)
{
auto elem = static_cast<uint32_t>(*(ptr + stride * i));
- int_res += (op == ReductionOperation::SUM_SQUARE) ? elem * elem : elem;
+
+ switch(op)
+ {
+ case ReductionOperation::ARG_IDX_MIN:
+ if(static_cast<uint32_t>(*(ptr + stride * static_cast<uint32_t>(res))) > elem)
+ {
+ res = static_cast<uint32_t>(i);
+ }
+ break;
+ case ReductionOperation::ARG_IDX_MAX:
+ if(static_cast<uint32_t>(*(ptr + stride * static_cast<uint32_t>(res))) < elem)
+ {
+ res = static_cast<uint32_t>(i);
+ }
+ break;
+ case ReductionOperation::SUM_SQUARE:
+ int_res += elem * elem;
+ break;
+ case ReductionOperation::MEAN_SUM:
+ case ReductionOperation::SUM:
+ int_res += elem;
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Operation not supported");
+ }
}
if(op == ReductionOperation::MEAN_SUM && reduce_elements > 0)
{
@@ -63,7 +87,30 @@ T reduce_operation(T *ptr, int reduce_elements, ReductionOperation op, int strid
for(int i = 0; i < reduce_elements; ++i)
{
auto elem = *(ptr + stride * i);
- res += (op == ReductionOperation::SUM_SQUARE) ? elem * elem : elem;
+ switch(op)
+ {
+ case ReductionOperation::ARG_IDX_MIN:
+ if(*(ptr + stride * static_cast<uint32_t>(res)) > elem)
+ {
+ res = static_cast<uint32_t>(i);
+ }
+ break;
+ case ReductionOperation::ARG_IDX_MAX:
+ if(*(ptr + stride * static_cast<uint32_t>(res)) < elem)
+ {
+ res = static_cast<uint32_t>(i);
+ }
+ break;
+ case ReductionOperation::SUM_SQUARE:
+ res += elem * elem;
+ break;
+ case ReductionOperation::MEAN_SUM:
+ case ReductionOperation::SUM:
+ res += elem;
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Operation not supported");
+ }
}
if(op == ReductionOperation::MEAN_SUM && reduce_elements > 0)
{
@@ -79,7 +126,9 @@ template <typename T>
SimpleTensor<T> reduction_operation(const SimpleTensor<T> &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op)
{
// Create reference
- SimpleTensor<T> dst{ dst_shape, src.data_type(), 1, src.quantization_info() };
+ const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX);
+ DataType output_data_type = is_arg_min_max ? DataType::U32 : src.data_type();
+ SimpleTensor<T> dst{ dst_shape, output_data_type, 1, src.quantization_info() };
const unsigned int src_width = src.shape().x();
const unsigned int src_height = src.shape().y();
const unsigned int src_depth = src.shape().z();
@@ -94,8 +143,14 @@ SimpleTensor<T> reduction_operation(const SimpleTensor<T> &src, const TensorShap
for(unsigned int du = 0; du < upper_dims; ++du)
{
const T *src_row_ptr = src.data() + du * reduce_elems;
- auto res = reduce_operation(src_row_ptr, reduce_elems, op, 1);
- dst[du] = res;
+ if(is_arg_min_max)
+ {
+ dst[du] = reduce_operation<T, uint32_t>(src_row_ptr, reduce_elems, op, 1);
+ }
+ else
+ {
+ dst[du] = reduce_operation<T, T>(src_row_ptr, reduce_elems, op, 1);
+ }
}
}
break;
@@ -109,8 +164,15 @@ SimpleTensor<T> reduction_operation(const SimpleTensor<T> &src, const TensorShap
const int in_offset = du * src_height * src_width + x;
const int out_offset = du * src_width + x;
const T *src_row_ptr = src.data() + in_offset;
- auto res = reduce_operation(src_row_ptr, reduce_elems, op, src_width);
- dst[out_offset] = res;
+
+ if(is_arg_min_max)
+ {
+ dst[out_offset] = reduce_operation<T, uint32_t>(src_row_ptr, reduce_elems, op, src_width);
+ }
+ else
+ {
+ dst[out_offset] = reduce_operation<T, T>(src_row_ptr, reduce_elems, op, src_width);
+ }
}
}
}
@@ -127,8 +189,15 @@ SimpleTensor<T> reduction_operation(const SimpleTensor<T> &src, const TensorShap
const int in_offset = du * src_depth * src_height * src_width + y * src_width + x;
const int out_offset = du * src_width * src_height + y * src_width + x;
const T *src_row_ptr = src.data() + in_offset;
- auto res = reduce_operation(src_row_ptr, reduce_elems, op, src_height * src_width);
- dst[out_offset] = res;
+
+ if(is_arg_min_max)
+ {
+ dst[out_offset] = reduce_operation<T, uint32_t>(src_row_ptr, reduce_elems, op, src_height * src_width);
+ }
+ else
+ {
+ dst[out_offset] = reduce_operation<T, T>(src_row_ptr, reduce_elems, op, src_height * src_width);
+ }
}
}
}
@@ -148,8 +217,14 @@ SimpleTensor<T> reduction_operation(const SimpleTensor<T> &src, const TensorShap
const int in_offset = du * src_batch * src_depth * src_height * src_width + z * src_width * src_height + y * src_width + x;
const int out_offset = du * src_depth * src_height * src_width + z * src_width * src_height + y * src_width + x;
const T *src_row_ptr = src.data() + in_offset;
- auto res = reduce_operation(src_row_ptr, reduce_elems, op, src_width * src_height * src_depth);
- dst[out_offset] = res;
+ if(is_arg_min_max)
+ {
+ dst[out_offset] = reduce_operation<T, uint32_t>(src_row_ptr, reduce_elems, op, src_width * src_height * src_depth);
+ }
+ else
+ {
+ dst[out_offset] = reduce_operation<T, T>(src_row_ptr, reduce_elems, op, src_width * src_height * src_depth);
+ }
}
}
}
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index 91000cb702..2b81192a44 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -1342,6 +1342,12 @@ inline ::std::ostream &operator<<(::std::ostream &os, const ReductionOperation &
case ReductionOperation::MEAN_SUM:
os << "MEAN_SUM";
break;
+ case ReductionOperation::ARG_IDX_MAX:
+ os << "ARG_IDX_MAX";
+ break;
+ case ReductionOperation::ARG_IDX_MIN:
+ os << "ARG_IDX_MIN";
+ break;
default:
ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
}