aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichele Di Giorgio <michele.digiorgio@arm.com>2018-10-24 12:20:19 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:55:45 +0000
commita1422fbf985c89ffebc8f5af8093e9cd987cfe29 (patch)
tree4536c22bb96859e8d80cc0c11e4d106b780b7539
parent33893c3e5a8d298f1a9fcc36ab89b73382fc1245 (diff)
downloadComputeLibrary-a1422fbf985c89ffebc8f5af8093e9cd987cfe29.tar.gz
COMPMID-1673: Collapse window in CLArithmeticAddition when one operand is a vector
When one of the operands is a vector, the kernel does a broadcast addition and the window is not collapsed. This represent an issue because it leads to a lot of enqueues that increases the time taken by the OpenCL driver. This patch allows to collapse the window when one of the two operands is a vector. Furthermore, it adds LWS tuner to the kernel. It also changes the number of elements processed per iteration to 8 to make better usage of the cache. Change-Id: I5f09ab0ddcffb3b7f9326a987c79a997b2d7fa8c Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/155003 Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com> Tested-by: bsgcomp <bsgcomp@arm.com>
-rw-r--r--src/core/CL/cl_kernels/arithmetic_op.cl14
-rw-r--r--src/core/CL/kernels/CLArithmeticAdditionKernel.cpp21
-rw-r--r--tests/validation/CL/ArithmeticAddition.cpp9
3 files changed, 31 insertions, 13 deletions
diff --git a/src/core/CL/cl_kernels/arithmetic_op.cl b/src/core/CL/cl_kernels/arithmetic_op.cl
index 9efb71b199..557615e7f2 100644
--- a/src/core/CL/cl_kernels/arithmetic_op.cl
+++ b/src/core/CL/cl_kernels/arithmetic_op.cl
@@ -33,11 +33,13 @@
#define DIV(x, y) (x) / (y)
+#if defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(DATA_TYPE_OUT) && defined(VEC_SIZE)
/** This function adds two tensors.
*
* @attention The input and output data_types need to be passed at compile time using -DDATA_TYPE_IN1, -DDATA_TYPE_IN2 and -DDATA_TYPE_OUT:
* e.g. -DDATA_TYPE_IN1=uchar -DDATA_TYPE_IN2=uchar -DDATA_TYPE_OUT=short
* @attention To perform saturating operation -DSATURATE has to be passed to the compiler otherwise wrapping policy will be used.
+ * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
*
* @param[in] in1_ptr Pointer to the source tensor. Supported data types: U8/S16/F16/F32
* @param[in] in1_stride_x Stride of the source tensor in X dimension (in bytes)
@@ -75,14 +77,16 @@ __kernel void arithmetic_add(
Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(out);
// Load values
- VEC_DATA_TYPE(DATA_TYPE_OUT, 16)
- in_a = CONVERT(vload16(0, (__global DATA_TYPE_IN1 *)in1.ptr), VEC_DATA_TYPE(DATA_TYPE_OUT, 16));
- VEC_DATA_TYPE(DATA_TYPE_OUT, 16)
- in_b = CONVERT(vload16(0, (__global DATA_TYPE_IN2 *)in2.ptr), VEC_DATA_TYPE(DATA_TYPE_OUT, 16));
+ VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)
+ in_a = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN1 *)in1.ptr), VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE));
+ VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)
+ in_b = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE_IN2 *)in2.ptr), VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE));
// Calculate and store result
- vstore16(ADD(in_a, in_b), 0, (__global DATA_TYPE_OUT *)out.ptr);
+ VSTORE(VEC_SIZE)
+ (ADD(in_a, in_b), 0, (__global DATA_TYPE_OUT *)out.ptr);
}
+#endif /* defined(DATA_TYPE_IN1) && defined(DATA_TYPE_IN2) && defined(DATA_TYPE_OUT) && defined(VEC_SIZE) */
/** This function subtracts one tensor from another.
*
diff --git a/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp b/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp
index de14f00856..10d7fd4f2c 100644
--- a/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp
+++ b/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp
@@ -31,7 +31,7 @@ using namespace arm_compute;
namespace
{
-constexpr unsigned int num_elems_processed_per_iteration = 16;
+constexpr unsigned int num_elems_processed_per_iteration = 8;
Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output, ConvertPolicy policy)
{
@@ -140,6 +140,7 @@ void CLArithmeticAdditionKernel::configure(const ICLTensor *input1, const ICLTen
build_opts.emplace("-DDATA_TYPE_IN1=" + get_cl_type_from_data_type(input1->info()->data_type()));
build_opts.emplace("-DDATA_TYPE_IN2=" + get_cl_type_from_data_type(input2->info()->data_type()));
build_opts.emplace("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type()));
+ build_opts.emplace("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
if(is_data_type_quantized_asymmetric(input1->info()->data_type()))
{
build_opts.emplace("-DOFFSET_IN1=" + support::cpp11::to_string(input1->info()->quantization_info().offset));
@@ -155,6 +156,17 @@ void CLArithmeticAdditionKernel::configure(const ICLTensor *input1, const ICLTen
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
ICLKernel::configure_internal(win_config.second);
+
+ // Set config_id for enabling LWS tuning
+ _config_id = kernel_name;
+ _config_id += "_";
+ _config_id += lower_string(string_from_data_type(input1->info()->data_type()));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(output->info()->dimension(0));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(output->info()->dimension(1));
+ _config_id += (policy == ConvertPolicy::WRAP) ? "_wrap_" : "_saturate_";
+ _config_id += lower_string(string_from_data_layout(input1->info()->data_layout()));
}
Status CLArithmeticAdditionKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
@@ -176,8 +188,9 @@ void CLArithmeticAdditionKernel::run(const Window &window, cl::CommandQueue &que
const TensorShape &in_shape2 = _input2->info()->tensor_shape();
const TensorShape &out_shape = _output->info()->tensor_shape();
- bool can_collapse = true;
- if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
+ bool can_collapse = true;
+ const bool is_vector = in_shape1.num_dimensions() == 1 || in_shape2.num_dimensions() == 1;
+ if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1 && !is_vector)
{
can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); d++)
@@ -204,7 +217,7 @@ void CLArithmeticAdditionKernel::run(const Window &window, cl::CommandQueue &que
add_3D_tensor_argument(idx, _input2, slice_input2);
add_3D_tensor_argument(idx, _output, slice);
- enqueue(queue, *this, slice);
+ enqueue(queue, *this, slice, lws_hint());
collapsed.slide_window_slice_3D(slice_input1);
collapsed.slide_window_slice_3D(slice_input2);
diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp
index e327769397..09f1b7c5a9 100644
--- a/tests/validation/CL/ArithmeticAddition.cpp
+++ b/tests/validation/CL/ArithmeticAddition.cpp
@@ -43,6 +43,7 @@ namespace validation
{
namespace
{
+constexpr unsigned int num_elems_processed_per_iteration = 8;
/** Input data sets **/
const auto ArithmeticAdditionU8Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U8)), framework::dataset::make("DataType",
DataType::U8));
@@ -110,7 +111,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
validate(dst.info()->valid_region(), valid_region);
// Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
+ const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding();
validate(ref_src1.info()->padding(), padding);
validate(ref_src2.info()->padding(), padding);
validate(dst.info()->padding(), padding);
@@ -146,7 +147,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
validate(dst.info()->valid_region(), valid_region);
// Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
+ const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding();
validate(ref_src1.info()->padding(), padding);
validate(ref_src2.info()->padding(), padding);
validate(dst.info()->padding(), padding);
@@ -186,7 +187,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(frame
validate(dst.info()->valid_region(), valid_region);
// Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
+ const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding();
validate(ref_src1.info()->padding(), padding);
validate(ref_src2.info()->padding(), padding);
validate(dst.info()->padding(), padding);
@@ -235,7 +236,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
validate(dst.info()->valid_region(), valid_region);
// Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
+ const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding();
validate(ref_src1.info()->padding(), padding);
validate(ref_src2.info()->padding(), padding);
validate(dst.info()->padding(), padding);