aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2019-10-21 17:59:07 +0100
committerManuel Bottini <manuel.bottini@arm.com>2019-12-03 13:58:56 +0000
commit7b9998d0fe1f98768b690ead10ebfa166d1b873d (patch)
treed3f6b81fb2e414a9e0f8ed9597eab27ef970d725
parentf9179d393a07eb9eed753e315df79d22391906c6 (diff)
downloadComputeLibrary-7b9998d0fe1f98768b690ead10ebfa166d1b873d.tar.gz
COMPMID-1816: Use parallel reduction on 0 axis in CL ARG_MIN/ARG_MAX
Introducing new CLArgMinMax kernel Change-Id: I0b8254207cc3859d19ceef9b6429cf5c1c586db0 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/2202 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
-rw-r--r--arm_compute/core/CL/CLHelpers.h11
-rw-r--r--arm_compute/core/CL/CLKernels.h7
-rw-r--r--arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h94
-rw-r--r--arm_compute/core/CL/kernels/CLReductionOperationKernel.h19
-rw-r--r--arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h27
-rw-r--r--arm_compute/runtime/CL/functions/CLReductionOperation.h10
-rw-r--r--arm_compute/runtime/Utils.h13
-rw-r--r--src/core/CL/CLHelpers.cpp8
-rw-r--r--src/core/CL/CLKernelLibrary.cpp8
-rw-r--r--src/core/CL/cl_kernels/arg_min_max.cl431
-rw-r--r--src/core/CL/cl_kernels/helpers.h13
-rw-r--r--src/core/CL/cl_kernels/reduction_operation.cl111
-rw-r--r--src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp283
-rw-r--r--src/core/CL/kernels/CLReductionOperationKernel.cpp27
-rw-r--r--src/core/Utils.cpp3
-rw-r--r--src/runtime/CL/functions/CLArgMinMaxLayer.cpp128
-rw-r--r--src/runtime/CL/functions/CLReductionOperation.cpp54
-rw-r--r--src/runtime/Utils.cpp17
-rw-r--r--tests/validation/CL/ArgMinMax.cpp36
19 files changed, 1098 insertions, 202 deletions
diff --git a/arm_compute/core/CL/CLHelpers.h b/arm_compute/core/CL/CLHelpers.h
index cd65eafc9c..7e549be989 100644
--- a/arm_compute/core/CL/CLHelpers.h
+++ b/arm_compute/core/CL/CLHelpers.h
@@ -190,5 +190,16 @@ bool preferred_dummy_work_items_support(const cl::Device &device);
* @return An opencl kernel
*/
cl::Kernel create_opencl_kernel(CLCoreRuntimeContext *ctx, const std::string &kernel_name, const CLBuildOptions &build_opts);
+
+/** Creates a suitable LWS hint object for parallel implementations. Sets the number of WG based on the input size.
+ * If input width is smaller than 128 we can use fewer threads than 8.
+ *
+ * @param[in] input_dimension number of elements along the dimension to apply the parallellization
+ * @param[in] vector_size size of the vector in OpenCL
+ *
+ * @return An LWS hint object
+ */
+cl::NDRange create_lws_hint_parallel_implementations(unsigned int input_dimension, unsigned int vector_size);
+
} // namespace arm_compute
#endif /* __ARM_COMPUTE_CLHELPERS_H__ */
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index c3c485db7c..78437beffb 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -21,13 +21,14 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __ARM_COMPUTE_CLKERNELS_H__
-#define __ARM_COMPUTE_CLKERNELS_H__
+#ifndef ARM_COMPUTE_CLKERNELS_H
+#define ARM_COMPUTE_CLKERNELS_H
/* Header regrouping all the CL kernels */
#include "arm_compute/core/CL/kernels/CLAbsoluteDifferenceKernel.h"
#include "arm_compute/core/CL/kernels/CLAccumulateKernel.h"
#include "arm_compute/core/CL/kernels/CLActivationLayerKernel.h"
+#include "arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLBatchConcatenateLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLBatchToSpaceLayerKernel.h"
@@ -160,4 +161,4 @@
#include "arm_compute/core/CL/kernels/CLYOLOLayerKernel.h"
#include "arm_compute/core/CL/kernels/ICLDepthwiseConvolutionLayer3x3Kernel.h"
-#endif /* __ARM_COMPUTE_CLKERNELS_H__ */
+#endif /* ARM_COMPUTE_CLKERNELS_H */
diff --git a/arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h b/arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h
new file mode 100644
index 0000000000..7f4cfe3edc
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CLARGMINMAXLAYERKERNEL_H
+#define ARM_COMPUTE_CLARGMINMAXLAYERKERNEL_H
+
+#include "arm_compute/core/CL/ICLKernel.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Interface for the reduction operation kernel
+ *
+ * @note The default data type for an uninitialized output tensor is
+ * signed 32-bit integer (S32). It is the user's responsibility to check
+ * that the results do not overflow because the indices are computed
+ * in unsigned 32-bit (U32).
+ */
+class CLArgMinMaxLayerKernel : public ICLKernel
+{
+public:
+ /** Default constructor */
+ CLArgMinMaxLayerKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLArgMinMaxLayerKernel(const CLArgMinMaxLayerKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLArgMinMaxLayerKernel &operator=(const CLArgMinMaxLayerKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ CLArgMinMaxLayerKernel(CLArgMinMaxLayerKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ CLArgMinMaxLayerKernel &operator=(CLArgMinMaxLayerKernel &&) = default;
+ /** Default destructor */
+ ~CLArgMinMaxLayerKernel() = default;
+
+ /** Set the input and output tensors.
+ *
+ * @param[in] input Source tensor. Data types supported: S32/F16/F32.
+ * @param[in] prev_output Destination tensor of the previous iterations of @ref CLArgMinMaxLayerKernel. Data types supported: U32/S32
+ * Has to be nullptr for the first iteration
+ * @param[out] output Destination tensor. Data types supported: U32/S32
+ * Output will have the same number of dimensions as input.
+ * @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1,2,3
+ * @param[in] op Reduction operation to perform. Only ArgMin and ArgMax are supported.
+ */
+ void configure(const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref CLArgMinMaxLayerKernel.
+ *
+ * @param[in] input Source tensor info. Data types supported: S32/F16/F32.
+ * @param[in] prev_output Destination tensor info of the previous iterations. Data types supported: U32/S32
+ * Has to be nullptr for the first iteration
+ * @param[in] output Destination tensor info. Data types supported: U32/S32
+ * Output will have the same number of dimensions as input.
+ * @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1,2,3
+ * @param[in] op Reduction operation to perform. Only ArgMin and ArgMax are supported.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *prev_output, const ITensorInfo *output, unsigned int axis, ReductionOperation op);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ const ICLTensor *_input;
+ const ICLTensor *_prev_output;
+ ICLTensor *_output;
+ unsigned int _reduction_axis;
+ ReductionOperation _op;
+};
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CLARGMINMAXLAYERKERNEL_H */
diff --git a/arm_compute/core/CL/kernels/CLReductionOperationKernel.h b/arm_compute/core/CL/kernels/CLReductionOperationKernel.h
index 172ed8985a..1ed7e6e5aa 100644
--- a/arm_compute/core/CL/kernels/CLReductionOperationKernel.h
+++ b/arm_compute/core/CL/kernels/CLReductionOperationKernel.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __ARM_COMPUTE_CLREDUCTIONOPERATIONKERNEL_H__
-#define __ARM_COMPUTE_CLREDUCTIONOPERATIONKERNEL_H__
+#ifndef ARM_COMPUTE_CLREDUCTIONOPERATIONKERNEL_H
+#define ARM_COMPUTE_CLREDUCTIONOPERATIONKERNEL_H
#include "arm_compute/core/CL/ICLKernel.h"
#include "arm_compute/core/Types.h"
@@ -32,11 +32,6 @@ namespace arm_compute
class ICLTensor;
/** Interface for the reduction operation kernel
- *
- * @note For ARG_MIN/ARG_MAX reduction, the default data type for an uninitialized
- * output tensor is signed 32-bit integer (S32). It is the user's responsibility
- * to check that the results do not overflow because the indices are computed
- * in unsigned 32-bit (U32).
*/
class CLReductionOperationKernel : public ICLKernel
{
@@ -57,10 +52,10 @@ public:
/** Set the input and output tensors.
*
* @param[in] input Source tensor. Data types supported: QASYMM8/S32/F16/F32.
- * @param[out] output Destination tensor. Data types and data layouts supported: Same as @p input, U32/S32 for ARG_MIX/ARG_MAX.
+ * @param[out] output Destination tensor. Data types and data layouts supported: Same as @p input.
* Output will have the same number of dimensions as input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1,2,3
- * @param[in] op Reduction operation to perform.
+ * @param[in] op Reduction operation to perform. Operations supported: MEAN_SUM, PROD, SUM_SQUARE, SUM, MIN, MAX
* @param[in] width (Optional) In case of x-axis we also need to provide the width of the input image.
*/
void configure(const ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, unsigned int width = 0);
@@ -68,10 +63,10 @@ public:
/** Static function to check if given info will lead to a valid configuration of @ref CLReductionOperationKernel.
*
* @param[in] input Source tensor info. Data types supported: QASYMM8/S32/F16/F32.
- * @param[in] output Destination tensor info. Data types and data layouts supported: Same as @p input, U32/S32 for ARG_MIX/ARG_MAX.
+ * @param[in] output Destination tensor info. Data types and data layouts supported: Same as @p input.
* Output will have the same number of dimensions as input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1,2,3
- * @param[in] op Reduction operation to perform.
+ * @param[in] op Reduction operation to perform. Operations supported: MEAN_SUM, PROD, SUM_SQUARE, SUM, MIN, MAX
* @param[in] width (Optional) In case of x-axis we also need to provide the width of the input image.
*
* @return a status
@@ -90,4 +85,4 @@ private:
BorderSize _border_size;
};
} // namespace arm_compute
-#endif /*__ARM_COMPUTE_CLREDUCTIONOPERATIONKERNEL_H__ */
+#endif /*ARM_COMPUTE_CLREDUCTIONOPERATIONKERNEL_H */
diff --git a/arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h b/arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h
index 1b465a4866..21cded0417 100644
--- a/arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h
+++ b/arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h
@@ -21,10 +21,13 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __ARM_COMPUTE_CLARGMINMAXLAYER_H__
-#define __ARM_COMPUTE_CLARGMINMAXLAYER_H__
+#ifndef ARM_COMPUTE_CLARGMINMAXLAYER_H
+#define ARM_COMPUTE_CLARGMINMAXLAYER_H
+#include "arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h"
+#include "arm_compute/core/CL/kernels/CLReshapeLayerKernel.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/IMemoryManager.h"
#include "arm_compute/runtime/MemoryGroup.h"
@@ -33,7 +36,6 @@ namespace arm_compute
{
class ITensorInfo;
class ICLTensor;
-class CLReductionOperation;
/** Function to calculate the index of the minimum or maximum values in a
* tensor based on an axis.
@@ -53,19 +55,18 @@ public:
CLArgMinMaxLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Set the input and output tensors.
*
- * @param[in] input Input source tensor, this could be written if @ref CLReductionOperation
- * manipulates its border for better performance. Data types supported: F16/F32.
+ * @param[in] input Input source tensor. Data types supported: F16/F32.
* @param[in] axis Axis to find max/min index.
* @param[out] output Output source tensor. Data types supported: U32/S32.
- * @param[in] op Operation to perform: min or max
+ * @param[in] op Reduction operation to perform. Operations supported: ARG_IDX_MAX, ARG_IDX_MIN
*/
- void configure(ICLTensor *input, int axis, ICLTensor *output, const ReductionOperation &op);
+ void configure(const ICLTensor *input, int axis, ICLTensor *output, const ReductionOperation &op);
/** Static function to check if given info will lead to a valid configuration of @ref CLArgMinMaxLayer
*
* @param[in] input Input source tensor info. Data types supported: F16/F32.
* @param[in] axis Axis to find max/min index.
* @param[in] output Output source tensor info. Data types supported: U32/S32.
- * @param[in] op Operation to perform: min or max
+ * @param[in] op Reduction operation to perform. Operations supported: ARG_IDX_MAX, ARG_IDX_MIN
*
* @return a status
*/
@@ -75,7 +76,13 @@ public:
void run() override;
private:
- std::unique_ptr<CLReductionOperation> _reduction_function;
+ MemoryGroup _memory_group;
+ std::vector<CLTensor> _results_vector;
+ CLTensor _not_reshaped_output;
+ std::vector<CLArgMinMaxLayerKernel> _reduction_kernels_vector;
+ CLReshapeLayerKernel _reshape_kernel;
+ unsigned int _num_of_stages;
+ unsigned int _reduction_axis;
};
} // namespace arm_compute
-#endif /* __ARM_COMPUTE_CLARGMINMAXLAYER_H__ */
+#endif /* ARM_COMPUTE_CLARGMINMAXLAYER_H */
diff --git a/arm_compute/runtime/CL/functions/CLReductionOperation.h b/arm_compute/runtime/CL/functions/CLReductionOperation.h
index 405e1177fd..9e0bf03ffe 100644
--- a/arm_compute/runtime/CL/functions/CLReductionOperation.h
+++ b/arm_compute/runtime/CL/functions/CLReductionOperation.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __ARM_COMPUTE_CLREDUCTIONOPERATION_H__
-#define __ARM_COMPUTE_CLREDUCTIONOPERATION_H__
+#ifndef ARM_COMPUTE_CLREDUCTIONOPERATION_H
+#define ARM_COMPUTE_CLREDUCTIONOPERATION_H
#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
#include "arm_compute/core/CL/kernels/CLReductionOperationKernel.h"
@@ -57,7 +57,7 @@ public:
* @param[in] input Source tensor. Data types supported: QASYMM8/F16/F32.
* @param[out] output Destination tensor. Data types and data layouts supported: Same as @p input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0, 1, 2, 3
- * @param[in] op Reduction operation to perform.
+ * @param[in] op Reduction operation to perform. Operations supported: MEAN_SUM, PROD, SUM_SQUARE, SUM, MIN, MAX
* @param[in] keep_dims (Optional) Whether to keep the reduced dimension after the operation. Defaults to true.
*/
void configure(ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, bool keep_dims = true);
@@ -67,7 +67,7 @@ public:
* @param[in] input Source tensor info. Data types supported: QASYMM8/F16/F32.
* @param[in] output Destination tensor info. Data types and data layouts supported: Same as @p input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0, 1, 2, 3
- * @param[in] op Reduction operation to perform.
+ * @param[in] op Reduction operation to perform. Operations supported: MEAN_SUM, PROD, SUM_SQUARE, SUM, MIN, MAX
* @param[in] keep_dims (Optional) Whether to keep the reduced dimension after the operation. Defaults to true.
*
* @return a status
@@ -92,4 +92,4 @@ private:
bool _is_reshape_required;
};
} // namespace arm_compute
-#endif /*__ARM_COMPUTE_CLREDUCTIONOPERATION_H__ */
+#endif /* ARM_COMPUTE_CLREDUCTIONOPERATION_H */ \ No newline at end of file
diff --git a/arm_compute/runtime/Utils.h b/arm_compute/runtime/Utils.h
index 15c0042a33..9a5b20eb26 100644
--- a/arm_compute/runtime/Utils.h
+++ b/arm_compute/runtime/Utils.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef __ARM_COMPUTE_RUNTIME_UTILS_H__
-#define __ARM_COMPUTE_RUNTIME_UTILS_H__
+#ifndef ARM_COMPUTE_RUNTIME_UTILS_H
+#define ARM_COMPUTE_RUNTIME_UTILS_H
#include "arm_compute/runtime/IRuntimeContext.h"
#include "arm_compute/runtime/Scheduler.h"
@@ -46,5 +46,12 @@ const std::string &string_from_scheduler_type(Scheduler::Type t);
* @param[in] hints Hints to use.
*/
void schedule_kernel_on_ctx(IRuntimeContext *ctx, ICPPKernel *kernel, const IScheduler::Hints &hints);
+
+/** Calculate number of stages for parallel implementations
+ *
+ * @param[in] input_x_dimension input tensor x dimension
+ * @param[in] axis axis to be used
+ */
+unsigned int calculate_number_of_stages_only_x_axis(size_t input_x_dimension, unsigned int axis);
} // namespace arm_compute
-#endif /* __ARM_COMPUTE_RUNTIME_UTILS_H__ */
+#endif /* ARM_COMPUTE_RUNTIME_UTILS_H */
diff --git a/src/core/CL/CLHelpers.cpp b/src/core/CL/CLHelpers.cpp
index 28b1a3224f..9754bebd18 100644
--- a/src/core/CL/CLHelpers.cpp
+++ b/src/core/CL/CLHelpers.cpp
@@ -365,4 +365,12 @@ cl::Kernel create_opencl_kernel(CLCoreRuntimeContext *ctx, const std::string &ke
return static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));
}
}
+
+cl::NDRange create_lws_hint_parallel_implementations(unsigned int input_dimension, unsigned int vector_size)
+{
+ const unsigned int width_leftover = input_dimension % vector_size;
+ const unsigned int border_width = (width_leftover != 0) ? vector_size - width_leftover : 0;
+ const unsigned int num_of_threads = ((input_dimension + border_width) / 16);
+ return cl::NDRange(std::min(8U, num_of_threads));
+}
} // namespace arm_compute
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 5d5205439e..5b59094c81 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -150,6 +150,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "activation_layer", "activation_layer.cl" },
{ "activation_layer_quant", "activation_layer_quant.cl" },
{ "activation_layer_quant_f32", "activation_layer_quant.cl" },
+ { "arg_min_max_x", "arg_min_max.cl" },
+ { "arg_min_max_y", "arg_min_max.cl" },
+ { "arg_min_max_z", "arg_min_max.cl" },
+ { "arg_min_max_w", "arg_min_max.cl" },
{ "batch_to_space_nchw", "batch_to_space.cl" },
{ "batch_to_space_static_nchw", "batch_to_space.cl" },
{ "batch_to_space_nhwc", "batch_to_space.cl" },
@@ -585,6 +589,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
#include "./cl_kernels/activation_layer_quant.clembed"
},
{
+ "arg_min_max.cl",
+#include "./cl_kernels/arg_min_max.clembed"
+ },
+ {
"batch_to_space.cl",
#include "./cl_kernels/batch_to_space.clembed"
},
diff --git a/src/core/CL/cl_kernels/arg_min_max.cl b/src/core/CL/cl_kernels/arg_min_max.cl
new file mode 100644
index 0000000000..3f75377636
--- /dev/null
+++ b/src/core/CL/cl_kernels/arg_min_max.cl
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(ARG_MAX)
+#define CONDITION_TO_USE(x, y) ISGREATER(x, y)
+#elif defined(ARG_MIN)
+#define CONDITION_TO_USE(x, y) ISLESS(x, y)
+#else // !(defined(ARG_MAX) || defined(ARG_MIN))
+#error "Unsupported reduction operation!"
+#endif // defined(ARG_MAX)
+
+#if defined(DATA_TYPE_OUTPUT)
+#if defined(WIDTH)
+#if defined(ARG_MIN)
+#if defined(PREV_OUTPUT)
+/** Find index minimum value of a vector
+ *
+ * @param[in] input Pointer to the first value.
+ *
+ * @return index of the vector.
+ */
+inline DATA_TYPE_OUTPUT arg_idx_min_prev_out(__global const DATA_TYPE *input, __global const DATA_TYPE_OUTPUT *prev_res, const int x_idx)
+{
+ int end_elem = (x_idx + 1) * 16;
+ if(end_elem > WIDTH)
+ {
+ end_elem = WIDTH - x_idx * 16;
+ }
+ DATA_TYPE_OUTPUT res = prev_res[0];
+ for(int x_v = 1; x_v < end_elem; ++x_v)
+ {
+ res = select(res, prev_res[x_v], *(input + prev_res[x_v]) < * (input + res));
+ }
+ return res;
+}
+#else // !defined(PREV_OUTPUT)
+/** Find index minimum value of a vector
+ *
+ * @param[in] input Pointer to the first value.
+ *
+ * @return index of the vector.
+ */
+inline DATA_TYPE_OUTPUT arg_idx_min(__global const DATA_TYPE *input, const int x_idx)
+{
+#if WIDTH < 16
+ DATA_TYPE_OUTPUT res = 0;
+ for(DATA_TYPE_OUTPUT x_v = res + 1; x_v < WIDTH; ++x_v)
+ {
+ res = select(res, x_v, *(input + x_v) < * (input + res));
+ }
+ return res;
+#else // WIDTH >= 16
+ int x_elem = x_idx * 16;
+ const int x_goback = select(0, 16 - WIDTH % 16, x_elem + 16 > WIDTH);
+ x_elem -= x_goback;
+
+ VEC_DATA_TYPE(DATA_TYPE, 16)
+ in = vload16(0, input - x_goback);
+ VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+ res = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
+
+ VEC_DATA_TYPE(COND_DATA_TYPE, 8)
+ idx_sel = (in.s01234567 <= in.s89abcdef);
+ in.s01234567 = select(in.s89abcdef, in.s01234567, idx_sel);
+ res.s01234567 = select(res.s89abcdef, res.s01234567, CONVERT(idx_sel, int8));
+
+ idx_sel.s0123 = (in.s0123 < in.s4567) || (in.s0123 == in.s4567 && CONVERT((res.s0123 < res.s4567), VEC_DATA_TYPE(COND_DATA_TYPE, 4)));
+ in.s0123 = select(in.s4567, in.s0123, idx_sel.s0123);
+ res.s0123 = select(res.s4567, res.s0123, CONVERT(idx_sel.s0123, int4));
+
+ idx_sel.s01 = (in.s01 < in.s23) || (in.s01 == in.s23 && CONVERT((res.s01 < res.s23), VEC_DATA_TYPE(COND_DATA_TYPE, 2)));
+ in.s01 = select(in.s23, in.s01, idx_sel.s01);
+ res.s01 = select(res.s23, res.s01, CONVERT(idx_sel.s01, int2));
+
+ idx_sel.s0 = (in.s0 < in.s1) || (in.s0 == in.s1 && CONVERT((res.s0 < res.s1), COND_DATA_TYPE));
+ res.s0 = select(res.s1, res.s0, CONVERT(idx_sel.s0, int));
+
+ return res.s0 + x_elem;
+#endif // WIDTH < 16
+}
+#endif // defined(PREV_OUTPUT)
+#endif // defined(ARG_MIN)
+#if defined(ARG_MAX)
+#if defined(PREV_OUTPUT)
+/** Find index maximum value of a vector
+ *
+ * @param[in] input Pointer to the first value.
+ *
+ * @return index of the vector.
+ */
+inline DATA_TYPE_OUTPUT arg_idx_max_prev_out(__global const DATA_TYPE *input, __global const DATA_TYPE_OUTPUT *prev_res, const int x_idx)
+{
+ int end_elem = (x_idx + 1) * 16;
+ if(end_elem > WIDTH)
+ {
+ end_elem = WIDTH - x_idx * 16;
+ }
+ DATA_TYPE_OUTPUT res = prev_res[0];
+ for(int x_v = 1; x_v < end_elem; ++x_v)
+ {
+ res = select(res, prev_res[x_v], *(input + prev_res[x_v]) > *(input + res));
+ }
+ return res;
+}
+#else // !defined(PREV_OUTPUT)
+/** Find index maximum value of a vector
+ *
+ * @param[in] input Pointer to the first value.
+ *
+ * @return index of the vector.
+ */
+inline DATA_TYPE_OUTPUT arg_idx_max(__global const DATA_TYPE *input, const int x_idx)
+{
+#if WIDTH < 16
+ DATA_TYPE_OUTPUT res = 0;
+ for(DATA_TYPE_OUTPUT x_v = res + 1; x_v < WIDTH; ++x_v)
+ {
+ res = select(res, x_v, *(input + x_v) > *(input + res));
+ }
+ return res;
+#else // WIDTH >= 16
+ int x_elem = x_idx * 16;
+ const int x_goback = select(0, 16 - WIDTH % 16, x_elem + 16 > WIDTH);
+ x_elem -= x_goback;
+
+ VEC_DATA_TYPE(DATA_TYPE, 16)
+ in = vload16(0, input - x_goback);
+ VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+ res = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
+
+ VEC_DATA_TYPE(COND_DATA_TYPE, 8)
+ idx_sel = (in.s01234567 >= in.s89abcdef);
+ in.s01234567 = select(in.s89abcdef, in.s01234567, idx_sel);
+ res.s01234567 = select(res.s89abcdef, res.s01234567, CONVERT(idx_sel, int8));
+
+ idx_sel.s0123 = (in.s0123 > in.s4567) || (in.s0123 == in.s4567 && CONVERT((res.s0123 < res.s4567), VEC_DATA_TYPE(COND_DATA_TYPE, 4)));
+ in.s0123 = select(in.s4567, in.s0123, idx_sel.s0123);
+ res.s0123 = select(res.s4567, res.s0123, CONVERT(idx_sel.s0123, int4));
+
+ idx_sel.s01 = (in.s01 > in.s23) || (in.s01 == in.s23 && CONVERT((res.s01 < res.s23), VEC_DATA_TYPE(COND_DATA_TYPE, 2)));
+ in.s01 = select(in.s23, in.s01, idx_sel.s01);
+ res.s01 = select(res.s23, res.s01, CONVERT(idx_sel.s01, int2));
+
+ idx_sel.s0 = (in.s0 > in.s1) || (in.s0 == in.s1 && CONVERT((res.s0 < res.s1), COND_DATA_TYPE));
+ res.s0 = select(res.s1, res.s0, CONVERT(idx_sel.s0, int));
+
+ return res.s0 + x_elem;
+#endif // WIDTH < 16
+}
+#endif // defined(PREV_OUTPUT)
+#endif // defined(ARG_MAX)
+
+/** This kernel performs parallel reduction given an operation on x-axis.
+ *
+ * @note In case the results of previous stages are passed the flag PREV_OUTPUT has to be passed using -DPREV_OUTPUT
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The data type of the output must be passed at compile time using -DDATA_TYPE_OUTPUT: e.g. -DDATA_TYPE_OUTPUT=uint
+ * @note The arg_max flag must be passed at compile time using -DARG_MAX if we want to compute the ArgMax
+ * @note The arg_min flag must be passed at compile time using -DARG_MIN if we want to compute the ArgMin
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: S32/F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] prev_res_ptr (Optional) Pointer to previous results tensor. Supported data types: U32/S32
+ * @param[in] prev_res_stride_x (Optional) Stride of the output tensor in X dimension (in bytes)
+ * @param[in] prev_res_step_x (Optional) prev_res_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] prev_res_stride_y (Optional) Stride of the output tensor in Y dimension (in bytes)
+ * @param[in] prev_res_step_y (Optional) prev_res_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] prev_res_offset_first_element_in_bytes (Optional) The offset of the first element in the previous results tensor
+ * @param[in] partial_res_ptr The local buffer to hold partial result values. Supported data types: U32/S32
+ * @param[in] partial_res_stride_x Stride of the output tensor in X dimension (in bytes)
+ * @param[in] partial_res_step_x partial_res_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] partial_res_stride_y Stride of the output tensor in Y dimension (in bytes)
+ * @param[in] partial_res_step_y partial_res_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] partial_res_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] local_results Local buffer for storing the partial result
+ */
+__kernel void arg_min_max_x(
+ IMAGE_DECLARATION(src),
+#if defined(PREV_OUTPUT)
+ IMAGE_DECLARATION(prev_res),
+#endif // defined(PREV_OUTPUT)
+ IMAGE_DECLARATION(partial_res),
+ __local DATA_TYPE_OUTPUT *local_results)
+{
+#if defined(PREV_OUTPUT)
+ Image src = CONVERT_TO_IMAGE_STRUCT_NO_STEP(src);
+ Image prev_res = CONVERT_TO_IMAGE_STRUCT(prev_res);
+#else // !defined(PREV_OUTPUT)
+ Image src = CONVERT_TO_IMAGE_STRUCT(src);
+#endif // defined(PREV_OUTPUT)
+ Image partial_res = CONVERT_TO_IMAGE_STRUCT(partial_res);
+
+ unsigned int lsize = get_local_size(0);
+ unsigned int lid = get_local_id(0);
+
+ const uint x_idx = get_global_id(0);
+ const uint y_idx = get_global_id(1);
+ const __global DATA_TYPE *src_in_row = (const __global DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + y_idx * src_step_y);
+
+ for(unsigned int y = 0; y < get_local_size(1); ++y)
+ {
+#if defined(ARG_MAX)
+#if defined(PREV_OUTPUT)
+ local_results[lid] = arg_idx_max_prev_out(src_in_row, (__global DATA_TYPE_OUTPUT *)offset(&prev_res, 0, y), x_idx);
+#else // !defined(PREV_OUTPUT)
+ local_results[lid] = arg_idx_max((__global DATA_TYPE *)offset(&src, 0, y), x_idx);
+#endif // defined(PREV_OUTPUT)
+#else // defined(ARG_MIN)
+#if defined(PREV_OUTPUT)
+ local_results[lid] = arg_idx_min_prev_out(src_in_row, (__global DATA_TYPE_OUTPUT *)offset(&prev_res, 0, y), x_idx);
+#else // !defined(PREV_OUTPUT)
+ local_results[lid] = arg_idx_min((__global DATA_TYPE *)offset(&src, 0, y), x_idx);
+#endif // defined(PREV_OUTPUT)
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
+
+ barrier(CLK_LOCAL_MEM_FENCE);
+
+ // Perform parallel reduction
+ for(unsigned int i = lsize >> 1; i > 0; i >>= 1)
+ {
+ if(lid < i)
+ {
+ DATA_TYPE tmp0 = *(src_in_row + local_results[lid]);
+ DATA_TYPE tmp1 = *(src_in_row + local_results[lid + i]);
+#if defined(ARG_MAX)
+ local_results[lid] = select(
+ local_results[lid],
+ local_results[lid + i],
+ ((tmp0 == tmp1) && (local_results[lid + i] < local_results[lid])) || (tmp0 < tmp1));
+#else // defined(ARG_MIN)
+ local_results[lid] = select(
+ local_results[lid],
+ local_results[lid + i],
+ ((tmp0 == tmp1) && (local_results[lid + i] < local_results[lid])) || (tmp0 > tmp1));
+#endif // defined(ARG_MAX) || defined(ARG_MIN)
+ }
+ barrier(CLK_LOCAL_MEM_FENCE);
+ }
+
+ if(lid == 0)
+ {
+ ((__global DATA_TYPE_OUTPUT *)offset(&partial_res, get_group_id(0), y))[0] = local_results[0];
+ }
+ }
+}
+#endif // defined(WIDTH)
+
+#if defined(HEIGHT)
+/** This kernel performs reduction on y-axis.
+ *
+ * @note The input data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The data type of the output must be passed at compile time using -DDATA_TYPE_OUTPUT: e.g. -DDATA_TYPE_OUTPUT=uint
+ * @note The data type of the intermediate results must be passed at compile time using -DDATA_TYPE_PROMOTED: e.g. -DDATA_TYPE_PROMOTED=uint
+ * @note The height size must be passed at compile time using -DHEIGHT e.g. -DHEIGHT=128
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: S32/F16/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] output_ptr The local buffer to hold sumed values. Supported data types: U32/S32
+ * @param[in] output_stride_x Stride of the output tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the output tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
+ */
+__kernel void arg_min_max_y(
+ IMAGE_DECLARATION(src),
+ IMAGE_DECLARATION(output))
+{
+ Image src = CONVERT_TO_IMAGE_STRUCT(src);
+ Image output = CONVERT_TO_IMAGE_STRUCT(output);
+
+ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
+ res = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+ VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+ indx = 0;
+ for(unsigned int y = 1; y < HEIGHT; ++y)
+ {
+ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
+ in = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, y)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+ VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+ cond_conv = CONVERT(CONDITION_TO_USE(in, res), VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16));
+ indx = select(indx, y, cond_conv);
+ res = select(res, in, CONDITION_TO_USE(in, res));
+ }
+
+ // Store result
+ vstore16(indx, 0, (__global DATA_TYPE_OUTPUT *)output.ptr);
+}
+#endif // defined(HEIGHT)
+
+#if defined(DEPTH)
+/** This kernel performs reduction on z-axis.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The data type of the intermediate results must be passed at compile time using -DDATA_TYPE_PROMOTED: e.g. -DDATA_TYPE_PROMOTED=uint
+ * @note The depth size must be passed at compile time using -DDEPTH e.g. -DDEPTH=128
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: S32/F16/F32
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] output_ptr The local buffer to hold sumed values. Supported data types: U32/S32
+ * @param[in] output_stride_x Stride of the output tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the output tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the output tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
+ */
+__kernel void arg_min_max_z(
+ TENSOR3D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output))
+{
+ Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input);
+ Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output);
+
+ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
+ res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+ VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+ indx = 0;
+ for(DATA_TYPE_OUTPUT z = 1; z < DEPTH; ++z)
+ {
+ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
+ in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 0, z)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+ VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+ cond_conv = CONVERT(CONDITION_TO_USE(in, res), VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16));
+ indx = select(indx, z, cond_conv);
+ res = select(res, in, CONDITION_TO_USE(in, res));
+ }
+
+ // Store result
+ vstore16(indx, 0, (__global DATA_TYPE_OUTPUT *)output.ptr);
+}
+#endif /* defined(DEPTH) */
+
+#if defined(BATCH) && defined(DEPTH)
+/** This kernel performs reduction on w-axis.
+ *
+ * @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
+ * @note The data type of the intermediate results must be passed at compile time using -DDATA_TYPE_PROMOTED: e.g. -DDATA_TYPE_PROMOTED=uint
+ * @note The batch size must be passed at compile time using -DBATCH e.g. -DBATCH=128
+ * @note The depth size must be passed at compile time using -DBATCH e.g. -DDEPTH=128
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: S32/F16/F32
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] input_step_w input_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[in] output_ptr The local buffer to hold sumed values. Supported data types: U32/S32
+ * @param[in] output_stride_x Stride of the output tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the output tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the output tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_stride_w Stride of the output tensor in W dimension (in bytes)
+ * @param[in] output_step_w output_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the source tensor
+ */
+__kernel void arg_min_max_w(
+ TENSOR4D_DECLARATION(input),
+ TENSOR4D_DECLARATION(output))
+{
+ Tensor4D input = CONVERT_TO_TENSOR4D_STRUCT(input, DEPTH);
+ Tensor4D output = CONVERT_TO_TENSOR4D_STRUCT(output, DEPTH);
+
+ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
+ res = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, 0)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+ VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+ indx = 0;
+ for(DATA_TYPE_OUTPUT w = 1; w < BATCH; ++w)
+ {
+ VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
+ in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, w)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
+
+ VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16)
+ cond_conv = CONVERT(CONDITION_TO_USE(in, res), VEC_DATA_TYPE(DATA_TYPE_OUTPUT, 16));
+ indx = select(indx, w, cond_conv);
+ res = select(res, in, CONDITION_TO_USE(in, res));
+ }
+
+ // Store result
+ vstore16(indx, 0, (__global DATA_TYPE_OUTPUT *)output.ptr);
+}
+#endif /* defined(BATCH) && defined(DEPTH) */
+#endif // defined(DATA_TYPE_OUTPUT) \ No newline at end of file
diff --git a/src/core/CL/cl_kernels/helpers.h b/src/core/CL/cl_kernels/helpers.h
index eaeaa6034d..ec5701dc69 100644
--- a/src/core/CL/cl_kernels/helpers.h
+++ b/src/core/CL/cl_kernels/helpers.h
@@ -266,6 +266,19 @@
#define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x)))
#define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round)
+#if FLOAT_DATA_TYPE
+#define ISGREATER(x, y) isgreater(x, y)
+#define ISLESS(x, y) isless(x, y)
+#else // !FLOAT_DATA_TYPE
+#if defined(WIDTH)
+#define ISGREATER(x, y) (x > y) ? 1 : 0
+#define ISLESS(x, y) (x < y) ? 1 : 0
+#else // !defined(WIDTH)
+#define ISGREATER(x, y) select((int16)0, (int16)-1, x > y)
+#define ISLESS(x, y) select((int16)0, (int16)-1, x < y)
+#endif // defined(WIDTH)
+#endif // FLOAT_DATA_TYPE
+
#define VECTOR_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
diff --git a/src/core/CL/cl_kernels/reduction_operation.cl b/src/core/CL/cl_kernels/reduction_operation.cl
index 5a4bb9ff4c..451b962b01 100644
--- a/src/core/CL/cl_kernels/reduction_operation.cl
+++ b/src/core/CL/cl_kernels/reduction_operation.cl
@@ -23,19 +23,6 @@
*/
#include "helpers.h"
-#if FLOAT_DATA_TYPE
-#define ISGREATER(x, y) isgreater(x, y)
-#define ISLESS(x, y) isless(x, y)
-#else // !FLOAT_DATA_TYPE
-#if defined(WIDTH)
-#define ISGREATER(x, y) (x > y) ? 1 : 0
-#define ISLESS(x, y) (x < y) ? 1 : 0
-#else // !defined(WIDTH)
-#define ISGREATER(x, y) select((int16)0, (int16)-1, x > y)
-#define ISLESS(x, y) select((int16)0, (int16)-1, x < y)
-#endif // defined(WIDTH)
-#endif // FLOAT_DATA_TYPE
-
/** Calculate square sum of a vector
*
* @param[in] input Pointer to the first pixel.
@@ -164,7 +151,7 @@ __kernel void reduction_operation_x(
* @note The data type must be passed at compile time using -DDATA_TYPE: e.g. -DDATA_TYPE=float
* @note The width size must be passed at compile time using -DWIDTH e.g. -DWIDTH=128
* @note The product flag must be passed at compile time using -DPROD if we want to compute the product, otherwise sum will be used
- * @note In case of ARG_MIN and ARG_MAX the condition data type must be passed at compile time using -DCOND_DATA_TYPE e.g. -DCOND_DATA_TYPE=short
+ * @note In case of MIN and MAX the condition data type must be passed at compile time using -DCOND_DATA_TYPE e.g. -DCOND_DATA_TYPE=short
*
* @param[in] src_ptr Pointer to the source tensor. Supported data types: S32/F16/F32 and QASYMM8 for operation MEAN
* @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
@@ -184,32 +171,19 @@ __kernel void reduction_operation_non_parallel_x(
DATA_TYPE_PROMOTED res = *((__global DATA_TYPE *)vector_offset(&src, 0));
-#if defined(ARG_MAX) || defined(ARG_MIN)
- uint indx = 0;
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
-
for(unsigned int x = 1; x < WIDTH; ++x)
{
DATA_TYPE_PROMOTED in = *((__global DATA_TYPE *)vector_offset(&src, x));
-#if defined(ARG_MAX)
- indx = select(indx, x, ISGREATER(in, res));
- res = select(res, in, CONVERT(ISGREATER(in, res), COND_DATA_TYPE));
-#elif defined(ARG_MIN)
- indx = select(indx, x, ISLESS(in, res));
- res = select(res, in, CONVERT(ISLESS(in, res), COND_DATA_TYPE));
-#elif defined(MIN)
+#if defined(MIN)
res = select(res, in, CONVERT(ISLESS(in, res), COND_DATA_TYPE));
#elif defined(MAX)
- res = select(res, in, CONVERT(ISGREATER(in, res), COND_DATA_TYPE));
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
+ res = select(res, in, CONVERT(ISGREATER(in, res), COND_DATA_TYPE));
+#else // !(defined(MAX) || defined(MIN))
res += in;
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
+#endif // defined(MAX) || defined(MIN)
}
// Store result
-#if defined(ARG_MAX) || defined(ARG_MIN)
- *((__global uint *)output.ptr) = indx;
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
#if defined(MEAN)
res /= WIDTH;
#endif // defined(MEAN)
@@ -218,7 +192,6 @@ __kernel void reduction_operation_non_parallel_x(
#else // defined(MIN) || defined(MAX)
*((__global uchar *)output.ptr) = convert_uchar(res);
#endif // defined(MIN) || defined(MAX)
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
}
#endif // defined(WIDTH)
@@ -255,27 +228,15 @@ __kernel void reduction_operation_y(
res *= res;
#endif // defined(SUM_SQUARE)
-#if defined(ARG_MAX) || defined(ARG_MIN)
- uint16 indx = 0;
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
-
for(unsigned int y = 1; y < HEIGHT; ++y)
{
VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
in = CONVERT(vload16(0, (__global DATA_TYPE *)offset(&src, 0, y)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
-#if defined(ARG_MAX)
- uint16 cond_conv = CONVERT(ISGREATER(in, res), uint16);
- indx = select(indx, y, cond_conv);
- res = select(res, in, ISGREATER(in, res));
-#elif defined(ARG_MIN)
- uint16 cond_conv = CONVERT(ISLESS(in, res), uint16);
- indx = select(indx, y, cond_conv);
- res = select(res, in, ISLESS(in, res));
-#elif defined(MIN)
+#if defined(MIN)
res = select(res, in, ISLESS(in, res));
#elif defined(MAX)
- res = select(res, in, ISGREATER(in, res));
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
+ res = select(res, in, ISGREATER(in, res));
+#else // !(defined(MAX) || defined(MIN))
#if defined(SUM_SQUARE)
in *= in;
#endif // defined(SUM_SQUARE)
@@ -284,18 +245,14 @@ __kernel void reduction_operation_y(
#else // !defined(PROD)
res += in;
#endif // defined(PROD)
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
+#endif // defined(MAX) || defined(MIN)
}
// Store result
-#if defined(ARG_MAX) || defined(ARG_MIN)
- vstore16(indx, 0, (__global uint *)output.ptr);
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
#if defined(MEAN)
res /= HEIGHT;
#endif // defined(MEAN)
vstore16(CONVERT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
}
#endif // defined(HEIGHT)
@@ -340,10 +297,6 @@ __kernel void reduction_operation_z(
res *= res;
#endif // defined(SUM_SQUARE)
-#if defined(ARG_MAX) || defined(ARG_MIN)
- uint16 indx = 0;
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
-
for(unsigned int z = 1; z < DEPTH; ++z)
{
VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
@@ -354,19 +307,11 @@ __kernel void reduction_operation_z(
in1 = CONVERT(vload16(0, (__global DATA_TYPE *)tensor3D_offset(&input, 8, 0, z)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
#endif // defined(COMPLEX)
-#if defined(ARG_MAX)
- uint16 cond_conv = CONVERT(ISGREATER(in, res), uint16);
- indx = select(indx, z, cond_conv);
- res = select(res, in, ISGREATER(in, res));
-#elif defined(ARG_MIN)
- uint16 cond_conv = CONVERT(ISLESS(in, res), uint16);
- indx = select(indx, z, cond_conv);
- res = select(res, in, ISLESS(in, res));
-#elif defined(MIN)
+#if defined(MIN)
res = select(res, in, ISLESS(in, res));
#elif defined(MAX)
- res = select(res, in, ISGREATER(in, res));
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
+ res = select(res, in, ISGREATER(in, res));
+#else // !(defined(MAX) || defined(MIN))
#if defined(SUM_SQUARE)
in *= in;
#endif // defined(SUM_SQUARE)
@@ -377,14 +322,11 @@ __kernel void reduction_operation_z(
#if defined(COMPLEX)
res1 += in1;
#endif // defined(COMPLEX)
-#endif //defined(PROD)
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
+#endif // defined(PROD)
+#endif // defined(MAX) || defined(MIN)
}
// Store result
-#if defined(ARG_MAX) || defined(ARG_MIN)
- vstore16(indx, 0, (__global uint *)output.ptr);
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
#if defined(MEAN)
res /= DEPTH;
#endif // defined(MEAN)
@@ -392,7 +334,6 @@ __kernel void reduction_operation_z(
#if defined(COMPLEX)
vstore16(CONVERT(res1, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)tensor3D_offset(&output, 8, 0, 0));
#endif // defined(COMPLEX)
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
}
#endif /* defined(DEPTH) */
@@ -438,28 +379,16 @@ __kernel void reduction_operation_w(
res *= res;
#endif // defined(SUM_SQUARE)
-#if defined(ARG_MAX) || defined(ARG_MIN)
- uint16 indx = 0;
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
-
for(unsigned int w = 1; w < BATCH; ++w)
{
VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16)
in = CONVERT(vload16(0, (__global DATA_TYPE *)tensor4D_offset(&input, 0, 0, 0, w)), VEC_DATA_TYPE(DATA_TYPE_PROMOTED, 16));
-#if defined(ARG_MAX)
- uint16 cond_conv = CONVERT(ISGREATER(in, res), uint16);
- indx = select(indx, w, cond_conv);
- res = select(res, in, ISGREATER(in, res));
-#elif defined(ARG_MIN)
- uint16 cond_conv = CONVERT(ISLESS(in, res), uint16);
- indx = select(indx, w, cond_conv);
- res = select(res, in, ISLESS(in, res));
-#elif defined(MIN)
+#if defined(MIN)
res = select(res, in, ISLESS(in, res));
#elif defined(MAX)
- res = select(res, in, ISGREATER(in, res));
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
+ res = select(res, in, ISGREATER(in, res));
+#else // !(defined(MAX) || defined(MIN))
#if defined(SUM_SQUARE)
in *= in;
#endif // defined(SUM_SQUARE)
@@ -468,17 +397,13 @@ __kernel void reduction_operation_w(
#else //!defined(PROD)
res += in;
#endif //defined(PROD)
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
+#endif // defined(MAX) || defined(MIN)
}
// Store result
-#if defined(ARG_MAX) || defined(ARG_MIN)
- vstore16(indx, 0, (__global uint *)output.ptr);
-#else // !(defined(ARG_MAX) || defined(ARG_MIN))
#if defined(MEAN)
res /= BATCH;
#endif // defined(MEAN)
vstore16(CONVERT(res, VEC_DATA_TYPE(DATA_TYPE, 16)), 0, (__global DATA_TYPE *)output.ptr);
-#endif // defined(ARG_MAX) || defined(ARG_MIN)
}
#endif /* defined(BATCH) && defined(DEPTH) */
diff --git a/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp b/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
new file mode 100644
index 0000000000..c8e87ba5ce
--- /dev/null
+++ b/src/core/CL/kernels/CLArgMinMaxLayerKernel.cpp
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLArgMinMaxLayerKernel.h"
+
+#include "arm_compute/core/AccessWindowStatic.h"
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/CLValidate.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+#include "support/ToolchainSupport.h"
+
+namespace arm_compute
+{
+namespace
+{
+constexpr unsigned int vector_size = 16;
+
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *prev_output, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(op != ReductionOperation::ARG_IDX_MAX && op != ReductionOperation::ARG_IDX_MIN, "Only ARG_IDX_MAX and ARG_IDX_MIN are supported");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
+
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32, DataType::S32);
+ }
+ if(prev_output != nullptr && prev_output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(prev_output, 1, DataType::U32, DataType::S32);
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(prev_output, output);
+ }
+ }
+
+ return Status{};
+}
+
+std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *prev_output, ITensorInfo *output, unsigned int axis, ReductionOperation op)
+{
+ ARM_COMPUTE_UNUSED(op);
+ // Output tensor auto initialization if not yet initialized
+ TensorShape output_shape{ input->tensor_shape() };
+ output_shape.set(axis, 1);
+ DataType output_data_type = DataType::S32;
+ auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true));
+
+ Window win = calculate_max_window((prev_output != nullptr) ? (*prev_output) : (*input), Steps(vector_size));
+ bool window_changed = false;
+
+ switch(axis)
+ {
+ case 0:
+ {
+ ITensorInfo *input_tensor_access = prev_output != nullptr ? prev_output : input;
+ AccessWindowStatic input_access(input_tensor_access, 0, 0, static_cast<int>(input_tensor_access->dimension(0)), 1);
+ AccessWindowHorizontal output_access(output, 0, 1);
+ window_changed = update_window_and_padding(win, input_access, output_access);
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+ }
+ break;
+ case 1:
+ case 2:
+ case 3:
+ {
+ AccessWindowHorizontal input_access(input, 0, vector_size);
+ AccessWindowHorizontal output_access(output, 0, vector_size);
+ window_changed = update_window_and_padding(win, input_access, output_access);
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+ }
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_tuple(err, win);
+}
+} // namespace
+
+CLArgMinMaxLayerKernel::CLArgMinMaxLayerKernel()
+ : _input(nullptr), _prev_output(nullptr), _output(nullptr), _reduction_axis(0), _op(ReductionOperation::ARG_IDX_MAX)
+{
+}
+
+void CLArgMinMaxLayerKernel::configure(const ICLTensor *input, const ICLTensor *prev_output, ICLTensor *output, unsigned int axis, ReductionOperation op)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr, output->info(), axis, op));
+ auto win_config = validate_and_configure_window(input->info(), (prev_output != nullptr) ? prev_output->info() : nullptr, output->info(), axis, op);
+ ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
+
+ _input = input;
+ _prev_output = prev_output;
+ _output = output;
+ _reduction_axis = axis;
+ _op = op;
+
+ // Set build options
+ CLBuildOptions build_opts;
+ const std::string data_type_promoted = get_cl_type_from_data_type(input->info()->data_type());
+
+ build_opts.add_option_if(_prev_output != nullptr, "-DPREV_OUTPUT");
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DDATA_TYPE_PROMOTED=" + data_type_promoted);
+ build_opts.add_option_if(is_data_type_float(input->info()->data_type()), "-DFLOAT_DATA_TYPE");
+ build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MAX, "-DARG_MAX");
+ build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MIN, "-DARG_MIN");
+ build_opts.add_option("-DCOND_DATA_TYPE=" + get_cl_select_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DDATA_TYPE_OUTPUT=" + get_cl_type_from_data_type(output->info()->data_type()));
+
+ // Create kernel
+ cl::NDRange lws_hint = CLKernelLibrary::get().default_ndrange();
+ std::string kernel_axis_name;
+ switch(axis)
+ {
+ case 0:
+ {
+ const ICLTensor *input_for_width = prev_output != nullptr ? _prev_output : _input;
+ build_opts.add_option("-DWIDTH=" + support::cpp11::to_string(input_for_width->info()->dimension(0)));
+
+ kernel_axis_name = "x";
+ lws_hint = create_lws_hint_parallel_implementations(input_for_width->info()->dimension(0), vector_size);
+ }
+ break;
+ case 1:
+ build_opts.add_option("-DHEIGHT=" + support::cpp11::to_string(input->info()->dimension(1)));
+ kernel_axis_name = "y";
+ break;
+ case 2:
+ build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(input->info()->dimension(2)));
+ kernel_axis_name = "z";
+ break;
+ case 3:
+ build_opts.add_option("-DDEPTH=" + support::cpp11::to_string(input->info()->dimension(2)));
+ build_opts.add_option("-DBATCH=" + support::cpp11::to_string(input->info()->dimension(3)));
+ kernel_axis_name = "w";
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("arg_min_max_" + kernel_axis_name, build_opts.options()));
+
+ // Configure kernel window
+ ICLKernel::configure_internal(std::get<1>(win_config), lws_hint);
+}
+
+Status CLArgMinMaxLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *prev_output, const ITensorInfo *output, unsigned int axis, ReductionOperation op)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, prev_output, output, axis, op));
+ ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), (prev_output != nullptr) ? prev_output->clone().get() : nullptr, output->clone().get(), axis, op)));
+ return Status{};
+}
+
+void CLArgMinMaxLayerKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+
+ switch(_reduction_axis)
+ {
+ case 0:
+ {
+ // Set out window
+ Window out_window(window);
+ out_window.set(Window::DimX, Window::Dimension(0, 0, 0));
+
+ // Get first input and output slices
+ Window in_slice = window.first_slice_window_2D();
+ Window out_slice = out_window.first_slice_window_2D();
+
+ // Reshape window
+ const unsigned int border_width = ((in_slice.x().end() % vector_size) != 0) ? vector_size - in_slice.x().end() % vector_size : 0;
+ in_slice.set(Window::DimX, Window::Dimension(in_slice.x().start(), in_slice.x().end() + border_width, in_slice.x().step()));
+ const unsigned int num_tensors = _prev_output != nullptr ? 3 : 2;
+
+ // Set local sums buffer
+ unsigned int local_res_size = lws_hint()[0] * _output->info()->element_size();
+ _kernel.setArg(num_arguments_per_2D_tensor() * num_tensors, local_res_size, nullptr);
+ do
+ {
+ unsigned int idx = 0;
+ add_2D_tensor_argument(idx, _input, in_slice);
+ if(_prev_output != nullptr)
+ {
+ add_2D_tensor_argument(idx, _prev_output, in_slice);
+ }
+ add_2D_tensor_argument(idx, _output, out_slice);
+ enqueue(queue, *this, in_slice, lws_hint());
+ }
+ while(window.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
+ }
+ break;
+ case 1:
+ {
+ // Get first input and output slices
+ Window window_in{ window };
+ window_in.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(1), _input->info()->dimension(1)));
+ Window in_slice = window_in.first_slice_window_2D();
+ Window out_slice = window.first_slice_window_2D();
+
+ do
+ {
+ unsigned int idx = 0;
+ add_2D_tensor_argument(idx, _input, in_slice);
+ add_2D_tensor_argument(idx, _output, out_slice);
+ enqueue(queue, *this, in_slice, lws_hint());
+ }
+ while(window_in.slide_window_slice_2D(in_slice) && window.slide_window_slice_2D(out_slice));
+ }
+ break;
+ case 2:
+ {
+ // Get first input and output slices
+ Window window_in{ window };
+ window_in.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(2), _input->info()->dimension(2)));
+ Window in_slice = window_in.first_slice_window_3D();
+ Window out_slice = window.first_slice_window_3D();
+
+ do
+ {
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, in_slice);
+ add_3D_tensor_argument(idx, _output, out_slice);
+ enqueue(queue, *this, in_slice, lws_hint());
+ }
+ while(window_in.slide_window_slice_3D(in_slice) && window.slide_window_slice_3D(out_slice));
+ }
+ break;
+ case 3:
+ {
+ // Get first input and output slices
+ Window window_in{ window };
+ window_in.set(3, Window::Dimension(0, 1, 1));
+ Window in_slice = window_in.first_slice_window_4D();
+ Window out_slice = window.first_slice_window_4D();
+
+ do
+ {
+ unsigned int idx = 0;
+ add_4D_tensor_argument(idx, _input, in_slice);
+ add_4D_tensor_argument(idx, _output, out_slice);
+ enqueue(queue, *this, in_slice, lws_hint());
+ }
+ while(window_in.slide_window_slice_4D(in_slice) && window.slide_window_slice_4D(out_slice));
+ }
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+}
+} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLReductionOperationKernel.cpp b/src/core/CL/kernels/CLReductionOperationKernel.cpp
index cbf3923243..91ee83e530 100644
--- a/src/core/CL/kernels/CLReductionOperationKernel.cpp
+++ b/src/core/CL/kernels/CLReductionOperationKernel.cpp
@@ -60,19 +60,12 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u
ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
ARM_COMPUTE_RETURN_ERROR_ON(op == ReductionOperation::MEAN_SUM && axis == 0 && width == 0 && input->data_type() != DataType::QASYMM8);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN, "Not supported reduction operation, use CLArgMinMaxLayer");
if(output->total_size() != 0)
{
- if(op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QASYMM8, "Not supported operation for QASYMM8");
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U32, DataType::S32);
- }
- else
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
- }
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
}
return Status{};
@@ -81,9 +74,8 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u
std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, unsigned int axis, ReductionOperation op)
{
// Output tensor auto initialization if not yet initialized
- const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX);
- const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis, !is_arg_min_max);
- const DataType output_data_type = is_arg_min_max ? DataType::S32 : input->data_type();
+ const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis, true);
+ DataType output_data_type = input->data_type();
auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true));
const unsigned int num_elems_processed_per_iteration = (is_data_type_quantized(input->data_type()) && (axis == 0)) ? 1 : 16;
@@ -166,8 +158,6 @@ void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *ou
build_opts.add_option_if(is_data_type_float(input->info()->data_type()), "-DFLOAT_DATA_TYPE");
build_opts.add_option_if(op == ReductionOperation::SUM_SQUARE, "-DSUM_SQUARE");
build_opts.add_option_if(op == ReductionOperation::MEAN_SUM, "-DMEAN");
- build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MAX, "-DARG_MAX");
- build_opts.add_option_if(op == ReductionOperation::ARG_IDX_MIN, "-DARG_MIN");
build_opts.add_option_if(op == ReductionOperation::PROD, "-DPROD");
build_opts.add_option_if(op == ReductionOperation::MIN, "-DMIN");
build_opts.add_option_if(op == ReductionOperation::MAX, "-DMAX");
@@ -182,8 +172,6 @@ void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *ou
case ReductionOperation::MEAN_SUM:
build_opts.add_option(("-DOPERATION=sum"));
break;
- case ReductionOperation::ARG_IDX_MAX:
- case ReductionOperation::ARG_IDX_MIN:
case ReductionOperation::MIN:
case ReductionOperation::MAX:
break;
@@ -214,12 +202,9 @@ void CLReductionOperationKernel::configure(const ICLTensor *input, ICLTensor *ou
build_opts.add_option_if(op == ReductionOperation::MEAN_SUM, "-DWIDTH=" + support::cpp11::to_string(width));
const unsigned int width_leftover = input->info()->dimension(0) % border_val;
const unsigned int border_width = (width_leftover != 0) ? border_val - width_leftover : 0;
- const unsigned int num_of_threads = ((input->info()->dimension(0) + border_width) / 16);
kernel_axis_name = "x";
- // Set the number of WG based on the input size. If input width is < 128
- // we can use fewer threads than 8.
- lws_hint = cl::NDRange(std::min(8U, num_of_threads));
+ lws_hint = create_lws_hint_parallel_implementations(input->info()->dimension(0), border_val);
_border_size = BorderSize(0, border_width, 0, 0);
}
}
diff --git a/src/core/Utils.cpp b/src/core/Utils.cpp
index cbf6e48375..fa56118587 100644
--- a/src/core/Utils.cpp
+++ b/src/core/Utils.cpp
@@ -431,12 +431,11 @@ std::pair<unsigned int, unsigned int> arm_compute::scaled_dimensions(unsigned in
bool arm_compute::needs_serialized_reduction(ReductionOperation op, DataType dt, unsigned int axis)
{
- const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN);
const bool is_min_max = (op == ReductionOperation::MAX || op == ReductionOperation::MIN);
const bool is_quantized_type = is_data_type_quantized(dt);
const bool is_first_dim = (axis == 0);
- return !is_first_dim || is_arg_min_max || is_min_max || is_quantized_type;
+ return !is_first_dim || is_min_max || is_quantized_type;
}
#ifdef ARM_COMPUTE_ASSERTS_ENABLED
diff --git a/src/runtime/CL/functions/CLArgMinMaxLayer.cpp b/src/runtime/CL/functions/CLArgMinMaxLayer.cpp
index fd172d5f2c..4ac6d25d75 100644
--- a/src/runtime/CL/functions/CLArgMinMaxLayer.cpp
+++ b/src/runtime/CL/functions/CLArgMinMaxLayer.cpp
@@ -23,33 +23,145 @@
*/
#include "arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h"
-#include "arm_compute/runtime/CL/functions/CLReductionOperation.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/Utils.h"
namespace arm_compute
{
CLArgMinMaxLayer::CLArgMinMaxLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _reduction_function(support::cpp14::make_unique<CLReductionOperation>(std::move(memory_manager)))
+ : _memory_group(std::move(memory_manager)), _results_vector(), _not_reshaped_output(), _reduction_kernels_vector(), _reshape_kernel(), _num_of_stages(), _reduction_axis()
{
}
-void CLArgMinMaxLayer::configure(ICLTensor *input, int axis, ICLTensor *output, const ReductionOperation &op)
+Status CLArgMinMaxLayer::validate(const ITensorInfo *input, int axis, const ITensorInfo *output, const ReductionOperation &op)
{
- _reduction_function->configure(input, output, axis, op, false);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(op != ReductionOperation::ARG_IDX_MAX && op != ReductionOperation::ARG_IDX_MIN, "Invalid reduction operation");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= static_cast<int>(TensorShape::num_max_dimensions), "Reduction axis greater than max number of dimensions");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
+ const unsigned int num_of_stages = calculate_number_of_stages_only_x_axis(input->dimension(0), axis);
+
+ DataType output_data_type = DataType::S32;
+ TensorInfo not_reshaped_output;
+ const auto input_num_channles = input->num_channels();
+ const auto input_qinfo = input->quantization_info();
+
+ if(output->total_size() != 0)
+ {
+ output_data_type = output->data_type();
+ const TensorInfo expected_output_shape = output->clone()->set_tensor_shape(arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis, false));
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&expected_output_shape, output);
+ }
+
+ auto shape_before_reshape = input->tensor_shape();
+ shape_before_reshape.set(axis, 1);
+ auto initialize_tensorinfo = [](TensorInfo & ti, TensorShape shape, DataType data_type, int num_channels, QuantizationInfo qinfo)
+ {
+ ti.set_data_type(data_type).set_tensor_shape(shape).set_num_channels(num_channels).set_quantization_info(qinfo);
+ };
+
+ initialize_tensorinfo(not_reshaped_output, shape_before_reshape, output_data_type, input_num_channles, input_qinfo);
+
+ if(num_of_stages == 1)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(CLArgMinMaxLayerKernel::validate(input, nullptr, &not_reshaped_output, axis, op));
+ }
+ else
+ {
+ // Create temporary tensor infos
+ std::vector<TensorInfo> sums_vector(num_of_stages - 1);
+
+ // Create intermediate tensor info
+ TensorShape shape{ input->tensor_shape() };
+
+ for(unsigned int i = 0; i < num_of_stages - 1; i++)
+ {
+ shape.set(0, ceil(shape.x() / 128.f));
+ sums_vector[i].set_data_type(input->data_type());
+ sums_vector[i].set_tensor_shape(shape);
+ sums_vector[i].set_num_channels(input->num_channels());
+ }
+
+ // Validate ReductionOperation only on first kernel
+ ARM_COMPUTE_RETURN_ON_ERROR(CLArgMinMaxLayerKernel::validate(input, nullptr, &sums_vector[0], axis, op));
+
+ // Validate ReductionOperation on intermediate stages
+ for(unsigned int i = 1; i < num_of_stages - 1; ++i)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(CLArgMinMaxLayerKernel::validate(input, &sums_vector[i - 1], &sums_vector[i], axis, op));
+ }
+
+ // Validate ReductionOperation on the last stage
+ const unsigned int last_stage = num_of_stages - 1;
+ ARM_COMPUTE_RETURN_ON_ERROR(CLArgMinMaxLayerKernel::validate(input, &sums_vector[last_stage - 1], &not_reshaped_output, axis, op));
+ }
+ ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayerKernel::validate(&not_reshaped_output, output));
+ return Status{};
}
-Status CLArgMinMaxLayer::validate(const ITensorInfo *input, int axis, const ITensorInfo *output, const ReductionOperation &op)
+void CLArgMinMaxLayer::configure(const ICLTensor *input, int axis, ICLTensor *output, const ReductionOperation &op)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(op != ReductionOperation::ARG_IDX_MAX && op != ReductionOperation::ARG_IDX_MIN, "Invalid operation");
- return CLReductionOperation::validate(input, output, axis, op, false);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ _num_of_stages = calculate_number_of_stages_only_x_axis(input->info()->dimension(0), axis);
+ _reduction_axis = axis;
+
+ const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->info()->tensor_shape(), axis, false);
+ DataType output_data_type = (output->info()->data_type() == DataType::UNKNOWN) ? DataType::S32 : output->info()->data_type();
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true));
+
+ // Configure reduction operation kernels
+ _reduction_kernels_vector.resize(_num_of_stages);
+
+ _memory_group.manage(&_not_reshaped_output);
+ // Create temporary tensors
+ if(_num_of_stages == 1)
+ {
+ _reduction_kernels_vector[0].configure(input, nullptr, &_not_reshaped_output, axis, op);
+ }
+ else
+ {
+ _results_vector.resize(_num_of_stages - 1);
+ TensorShape shape{ input->info()->tensor_shape() };
+ for(unsigned int i = 0; i < _num_of_stages - 1; i++)
+ {
+ shape.set(0, ceil(shape.x() / 128.f));
+ _results_vector[i].allocator()->init(input->info()->clone()->set_tensor_shape(shape).set_data_type(output_data_type));
+ }
+
+ // Apply ReductionOperation only on first kernel
+ _memory_group.manage(&_results_vector[0]);
+ _reduction_kernels_vector[0].configure(input, nullptr, &_results_vector[0], axis, op);
+
+ // Apply ReductionOperation on intermediate stages
+ for(unsigned int i = 1; i < _num_of_stages - 1; ++i)
+ {
+ _memory_group.manage(&_results_vector[i]);
+ _reduction_kernels_vector[i].configure(input, &_results_vector[i - 1], &_results_vector[i], axis, op);
+ _results_vector[i - 1].allocator()->allocate();
+ }
+
+ // Apply ReductionOperation on the last stage
+ const unsigned int last_stage = _num_of_stages - 1;
+ _reduction_kernels_vector[last_stage].configure(input, &_results_vector[last_stage - 1], &_not_reshaped_output, axis, op);
+ _results_vector[last_stage - 1].allocator()->allocate();
+ }
+ _reshape_kernel.configure(&_not_reshaped_output, output);
+ _not_reshaped_output.allocator()->allocate();
}
void CLArgMinMaxLayer::run()
{
- _reduction_function->run();
+ MemoryGroupResourceScope scope_mg(_memory_group);
+
+ for(unsigned int i = 0; i < _num_of_stages; ++i)
+ {
+ CLScheduler::get().enqueue(_reduction_kernels_vector[i], false);
+ }
+ CLScheduler::get().enqueue(_reshape_kernel, false);
}
} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/CL/functions/CLReductionOperation.cpp b/src/runtime/CL/functions/CLReductionOperation.cpp
index 3aa5a813b6..2f9a38601d 100644
--- a/src/runtime/CL/functions/CLReductionOperation.cpp
+++ b/src/runtime/CL/functions/CLReductionOperation.cpp
@@ -33,30 +33,11 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/Utils.h"
#include "support/ToolchainSupport.h"
namespace arm_compute
{
-namespace
-{
-unsigned int calculate_number_of_stages(const ITensorInfo *input, unsigned int axis)
-{
- // We need only 1 stage for all axis except x-axis and x-axis for QASYMM8.
- if(axis != 0 || (axis == 0 && is_data_type_quantized(input->data_type())))
- {
- return 1;
- }
- // Calculate number of WGs. 16 elements per thread, 8 threads per WG
- const unsigned int num_of_wg = ceil(input->dimension(0) / 128.f);
-
- // Calculate number of stages. First stage performs op and the rest reduction sum
- // depending on the size of the input. Last stage should have only 1 WG.
- const unsigned int num_of_stages = num_of_wg / 128 + 2;
-
- return num_of_stages;
-}
-} // namespace
-
CLReductionOperation::CLReductionOperation(std::shared_ptr<IMemoryManager> memory_manager)
: _memory_group(std::move(memory_manager)), _results_vector(), _reduction_kernels_vector(), _border_handlers_vector(), _reshape_kernel(), _op(), _num_of_stages(), _reduction_axis(), _is_serial(),
_is_reshape_required(false)
@@ -65,15 +46,15 @@ CLReductionOperation::CLReductionOperation(std::shared_ptr<IMemoryManager> memor
Status CLReductionOperation::validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op, bool keep_dims)
{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis >= TensorShape::num_max_dimensions, "Reduction axis greater than max number of dimensions");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(axis > 3, "Unsupported reduction axis");
- const unsigned int num_of_stages = calculate_number_of_stages(input, axis);
+ const unsigned int num_of_stages = calculate_number_of_stages_only_x_axis(input->dimension(0), axis);
const bool is_serial = needs_serialized_reduction(op, input->data_type(), axis);
- const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN);
- const bool is_reshape_required = !keep_dims || is_arg_min_max;
+ const bool is_reshape_required = !keep_dims;
- if(is_reshape_required)
+ if(is_reshape_required && output->total_size() != 0)
{
const TensorInfo expected_output_shape = output->clone()->set_tensor_shape(arm_compute::misc::shape_calculator::compute_reduced_shape(input->tensor_shape(), axis, keep_dims));
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&expected_output_shape, output);
@@ -86,7 +67,7 @@ Status CLReductionOperation::validate(const ITensorInfo *input, const ITensorInf
const auto input_data_type = input->data_type();
const auto input_num_channles = input->num_channels();
const auto input_qinfo = input->quantization_info();
- const auto output_data_type = is_arg_min_max ? DataType::S32 : output->data_type();
+ const auto output_data_type = output->data_type();
auto initialize_tensorinfo = [](TensorInfo & ti, TensorShape shape, DataType data_type, int num_channels, QuantizationInfo qinfo)
{
@@ -184,8 +165,7 @@ ICLTensor *CLReductionOperation::configure_intermediate_result_vector(ICLTensor
return output;
}
- auto intermediate_result_vector_size = _is_serial ? 1 : _num_of_stages;
- const auto is_arg_min_max = (_op == ReductionOperation::ARG_IDX_MAX || _op == ReductionOperation::ARG_IDX_MIN);
+ auto intermediate_result_vector_size = _is_serial ? 1 : _num_of_stages;
if(!_is_reshape_required)
{
@@ -206,30 +186,24 @@ ICLTensor *CLReductionOperation::configure_intermediate_result_vector(ICLTensor
v.allocator()->init(input->info()->clone()->set_tensor_shape(shape));
}
- if(is_arg_min_max)
- {
- _results_vector.back().info()->set_data_type(DataType::S32).set_is_resizable(true).reset_padding();
- }
-
return _is_reshape_required ? &_results_vector.back() : output;
}
void CLReductionOperation::configure(ICLTensor *input, ICLTensor *output, unsigned int axis, ReductionOperation op, bool keep_dims)
{
- _op = op;
- _num_of_stages = calculate_number_of_stages(input->info(), axis);
- _reduction_axis = axis;
- _is_serial = needs_serialized_reduction(op, input->info()->data_type(), axis);
- const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN);
- _is_reshape_required = !keep_dims || is_arg_min_max;
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ _op = op;
+ _num_of_stages = calculate_number_of_stages_only_x_axis(input->info()->dimension(0), axis);
+ _reduction_axis = axis;
+ _is_serial = needs_serialized_reduction(op, input->info()->data_type(), axis);
+ _is_reshape_required = !keep_dims;
auto *output_internal = configure_intermediate_result_vector(input, output);
- // ArgMinMax might not give initialized output tensor, so initialize here.
if(_is_reshape_required)
{
const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(input->info()->tensor_shape(), axis, false);
- const auto output_data_type = is_arg_min_max ? DataType::S32 : input->info()->data_type();
+ const auto output_data_type = input->info()->data_type();
auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape).set_data_type(output_data_type).reset_padding().set_is_resizable(true));
}
diff --git a/src/runtime/Utils.cpp b/src/runtime/Utils.cpp
index 70494be05c..2204ec11d7 100644
--- a/src/runtime/Utils.cpp
+++ b/src/runtime/Utils.cpp
@@ -25,6 +25,7 @@
#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include <cmath>
#include <map>
#include <string>
@@ -61,4 +62,20 @@ void schedule_kernel_on_ctx(IRuntimeContext *ctx, ICPPKernel *kernel, const ISch
NEScheduler::get().schedule(kernel, hints);
}
}
+
+unsigned int calculate_number_of_stages_only_x_axis(size_t input_x_dimension, unsigned int axis)
+{
+ // We need only 1 stage for all axis except x-axis
+ if(axis != 0)
+ {
+ return 1;
+ }
+ // Calculate number of WGs. 16 elements per thread, 8 threads per WG
+ const auto num_of_wg = static_cast<unsigned int>(ceil(input_x_dimension / 128.f));
+
+ // Calculate number of stages. First stage performs op and the rest reduction sum
+ // depending on the size of the input. Last stage should have only 1 WG.
+ const unsigned int num_of_stages = num_of_wg / 128 + 2;
+ return num_of_stages;
+}
} // namespace arm_compute
diff --git a/tests/validation/CL/ArgMinMax.cpp b/tests/validation/CL/ArgMinMax.cpp
index 5b2e6f34c6..275641cb35 100644
--- a/tests/validation/CL/ArgMinMax.cpp
+++ b/tests/validation/CL/ArgMinMax.cpp
@@ -42,6 +42,18 @@ namespace test
{
namespace validation
{
+namespace
+{
+const auto ArgMinMaxSmallDataset = framework::dataset::make("Shape",
+{
+ TensorShape{ 2U, 7U, 1U, 3U },
+ TensorShape{ 128U, 64U, 21U, 3U },
+ TensorShape{ 2560, 2U, 2U, 2U },
+});
+
+const auto ArgMinMaxLargeDataset = framework::dataset::make("Shape",
+{ TensorShape{ 517U, 123U, 13U, 2U } });
+} // namespace
TEST_SUITE(CL)
TEST_SUITE(ArgMinMax)
@@ -98,7 +110,17 @@ TEST_SUITE(S32)
FIXTURE_DATA_TEST_CASE(RunSmall,
CLArgMinMaxValidationFixture<int32_t>,
framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::S32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
+ combine(combine(combine(ArgMinMaxSmallDataset, framework::dataset::make("DataType", DataType::S32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })),
+ framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge,
+ CLArgMinMaxValidationFixture<int32_t>,
+ framework::DatasetMode::NIGHTLY,
+ combine(combine(combine(ArgMinMaxLargeDataset, framework::dataset::make("DataType", DataType::S32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })),
+ framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -110,7 +132,8 @@ TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall,
CLArgMinMaxValidationFixture<half>,
framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
+ combine(combine(combine(ArgMinMaxSmallDataset, framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Axis", { 0, 1, 2, 3 })),
+ framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -119,7 +142,8 @@ FIXTURE_DATA_TEST_CASE(RunSmall,
FIXTURE_DATA_TEST_CASE(RunLarge,
CLArgMinMaxValidationFixture<half>,
framework::DatasetMode::NIGHTLY,
- combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
+ combine(combine(combine(ArgMinMaxLargeDataset, framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Axis", { 0, 1, 2, 3 })),
+ framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -130,7 +154,8 @@ TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSmall,
CLArgMinMaxValidationFixture<float>,
framework::DatasetMode::PRECOMMIT,
- combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
+ combine(combine(combine(ArgMinMaxSmallDataset, framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })),
+ framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -139,7 +164,8 @@ FIXTURE_DATA_TEST_CASE(RunSmall,
FIXTURE_DATA_TEST_CASE(RunLarge,
CLArgMinMaxValidationFixture<float>,
framework::DatasetMode::NIGHTLY,
- combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
+ combine(combine(combine(ArgMinMaxLargeDataset, framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })),
+ framework::dataset::make("Operation", { ReductionOperation::ARG_IDX_MIN, ReductionOperation::ARG_IDX_MAX })))
{
// Validate output
validate(CLAccessor(_target), _reference);