aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
diff options
context:
space:
mode:
authorDiego Lopez Recas <Diego.LopezRecas@arm.com>2017-12-18 14:42:56 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:45:00 +0000
commit0021d750d66d199c411df00cdd8308c325f1fef3 (patch)
treeb96e618977442a8aab335c136d369a958998d416 /src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
parent5b6904b8d9cb5e8a343cde96fd5a8701f44dff90 (diff)
downloadComputeLibrary-0021d750d66d199c411df00cdd8308c325f1fef3.tar.gz
IVGCVSW-863 Broadcast support in CL/NEON Arithmetic Add
Also, added instrumentation to support generic tensor broadcasting for NEON and CL backends. Change-Id: I1bc5747a286e1a4b464c209067581e103d473b9a Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/114201 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp')
-rw-r--r--src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp185
1 files changed, 106 insertions, 79 deletions
diff --git a/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp b/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
index 8a98cf7cbc..a487090a98 100644
--- a/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
+++ b/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,10 +46,12 @@ class Coordinates;
namespace
{
+constexpr unsigned int num_elems_processed_per_iteration = 16;
+
void add_wrap_QS8_QS8_QS8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -64,8 +66,8 @@ void add_wrap_QS8_QS8_QS8(const ITensor *in1, const ITensor *in2, ITensor *out,
void add_saturate_QS8_QS8_QS8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -80,8 +82,8 @@ void add_saturate_QS8_QS8_QS8(const ITensor *in1, const ITensor *in2, ITensor *o
void add_wrap_U8_U8_U8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -93,8 +95,8 @@ void add_wrap_U8_U8_U8(const ITensor *in1, const ITensor *in2, ITensor *out, con
void add_saturate_U8_U8_U8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -163,8 +165,8 @@ inline float16x8x2_t vadd2q_f16(const float16x8x2_t &a, const float16x8x2_t &b)
void add_F16_F16_F16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -186,8 +188,8 @@ void add_F16_F16_F16(const ITensor *in1, const ITensor *in2, ITensor *out, const
void add_F32_F32_F32(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -202,8 +204,8 @@ void add_F32_F32_F32(const ITensor *in1, const ITensor *in2, ITensor *out, const
void add_wrap_S16_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -218,8 +220,8 @@ void add_wrap_S16_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *out,
void add_saturate_S16_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -234,8 +236,8 @@ void add_saturate_S16_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *o
void add_wrap_S16_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -257,8 +259,8 @@ void add_wrap_S16_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, c
void add_saturate_S16_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -292,8 +294,8 @@ inline void add_saturate_U8_S16_S16(const ITensor *input1, const ITensor *input2
void add_wrap_U8_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -325,8 +327,8 @@ void add_wrap_U8_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, co
void add_saturate_U8_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -356,50 +358,84 @@ void add_saturate_U8_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out
input1, input2, output);
}
-inline Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output, ConvertPolicy policy)
{
ARM_COMPUTE_UNUSED(policy);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, input2, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::QS8, DataType::U8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::QS8, DataType::U8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QS8, DataType::U8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- if(is_data_type_fixed_point(input1->data_type()) || is_data_type_fixed_point(input2->data_type()) || is_data_type_fixed_point(output->data_type()))
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+
+ const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape());
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
+
+ if(is_data_type_fixed_point(input1.data_type()) || is_data_type_fixed_point(input2.data_type()))
{
- // Check that all data types are the same and all fixed-point positions are the same
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input1, input2, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(&input1, &input2);
}
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(
- !(input1->data_type() == DataType::QS8 && input2->data_type() == DataType::QS8 && output->data_type() == DataType::QS8)
- && !(input1->data_type() == DataType::U8 && input2->data_type() == DataType::U8 && output->data_type() == DataType::U8)
- && !(input1->data_type() == DataType::U8 && input2->data_type() == DataType::U8 && output->data_type() == DataType::S16)
- && !(input1->data_type() == DataType::U8 && input2->data_type() == DataType::S16 && output->data_type() == DataType::S16)
- && !(input1->data_type() == DataType::S16 && input2->data_type() == DataType::U8 && output->data_type() == DataType::S16)
- && !(input1->data_type() == DataType::QS16 && input2->data_type() == DataType::QS16 && output->data_type() == DataType::QS16)
- && !(input1->data_type() == DataType::S16 && input2->data_type() == DataType::S16 && output->data_type() == DataType::S16)
- && !(input1->data_type() == DataType::F32 && input2->data_type() == DataType::F32 && output->data_type() == DataType::F32)
- && !(input1->data_type() == DataType::F16 && input2->data_type() == DataType::F16 && output->data_type() == DataType::F16),
- "You called addition with the wrong image formats");
+ // Validate in case of configured output
+ if(output.total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(
+ !(input1.data_type() == DataType::QS8 && input2.data_type() == DataType::QS8 && output.data_type() == DataType::QS8)
+ && !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8 && output.data_type() == DataType::U8)
+ && !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8 && output.data_type() == DataType::S16)
+ && !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::S16 && output.data_type() == DataType::S16)
+ && !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::U8 && output.data_type() == DataType::S16)
+ && !(input1.data_type() == DataType::QS16 && input2.data_type() == DataType::QS16 && output.data_type() == DataType::QS16)
+ && !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::S16 && output.data_type() == DataType::S16)
+ && !(input1.data_type() == DataType::F32 && input2.data_type() == DataType::F32 && output.data_type() == DataType::F32)
+ && !(input1.data_type() == DataType::F16 && input2.data_type() == DataType::F16 && output.data_type() == DataType::F16),
+ "You called addition with the wrong image formats");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0),
+ "Wrong shape for output");
+
+ if(is_data_type_fixed_point(input1.data_type()) || is_data_type_fixed_point(output.data_type()))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(&input1, &output);
+ }
+ }
return Status{};
}
-inline std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
{
- constexpr unsigned int num_elems_processed_per_iteration = 16;
+ const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(input1, input2);
+ const TensorShape &out_shape = broadcast_pair.first;
+ const ValidRegion &valid_region = broadcast_pair.second;
- // Configure kernel window
- Window win = calculate_max_window(*input1, Steps(num_elems_processed_per_iteration));
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
+ // Auto initialize output if not initialized
+ {
+ set_shape_if_empty(output, out_shape);
+
+ if(input1.data_type() == DataType::S16 || input2.data_type() == DataType::S16)
+ {
+ set_format_if_unknown(output, Format::S16);
+ }
+ else if(input1.data_type() == DataType::F16 && input2.data_type() == DataType::F16)
+ {
+ set_format_if_unknown(output, Format::F16);
+ }
+ else if(input1.data_type() == DataType::F32 || input2.data_type() == DataType::F32)
+ {
+ set_format_if_unknown(output, Format::F32);
+ }
+ }
+
+ Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration));
+ Window win_input1 = win.broadcast_if_dimension_le_one(input1);
+ Window win_input2 = win.broadcast_if_dimension_le_one(input2);
- bool window_changed = update_window_and_padding(win,
- AccessWindowHorizontal(input1, 0, num_elems_processed_per_iteration),
- AccessWindowHorizontal(input2, 0, num_elems_processed_per_iteration),
- output_access);
+ AccessWindowHorizontal input1_access(&input1, 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal input2_access(&input2, 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal output_access(&output, 0, num_elems_processed_per_iteration);
- ValidRegion valid_region = intersect_valid_regions(input1->valid_region(),
- input2->valid_region());
+ bool window_changed = update_window_and_padding(win_input1, input1_access)
+ || update_window_and_padding(win_input2, input2_access)
+ || update_window_and_padding(win, output_access);
output_access.set_valid_region(win, valid_region);
@@ -416,26 +452,11 @@ NEArithmeticAdditionKernel::NEArithmeticAdditionKernel()
void NEArithmeticAdditionKernel::configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1->info(), *input2->info(), *output->info(), policy));
- // Auto initialize output if not initialized
- {
- set_shape_if_empty(*output->info(), input1->info()->tensor_shape());
-
- if(input1->info()->data_type() == DataType::S16 || input2->info()->data_type() == DataType::S16)
- {
- set_format_if_unknown(*output->info(), Format::S16);
- }
- else if(input1->info()->data_type() == DataType::F16 || input2->info()->data_type() == DataType::F16)
- {
- set_format_if_unknown(*output->info(), Format::F16);
- }
- else if(input1->info()->data_type() == DataType::F32 || input2->info()->data_type() == DataType::F32)
- {
- set_format_if_unknown(*output->info(), Format::F32);
- }
- }
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1->info(), input2->info(), output->info(), policy));
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(*input1->info(), *input2->info(), *output->info());
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
static std::map<std::string, AddFunction *> map_function =
{
@@ -476,16 +497,15 @@ void NEArithmeticAdditionKernel::configure(const ITensor *input1, const ITensor
_func = it->second;
}
- // Configure kernel window
- auto win_config = validate_and_configure_window(input1->info(), input2->info(), output->info());
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
INEKernel::configure(win_config.second);
}
Status NEArithmeticAdditionKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input1, input2, output, policy));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
+
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input1, *input2, *output, policy));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(*input1->clone(), *input2->clone(), *output->clone()).first);
return Status{};
}
@@ -499,3 +519,10 @@ void NEArithmeticAdditionKernel::run(const Window &window, const ThreadInfo &inf
(*_func)(_input1, _input2, _output, window);
}
+
+BorderSize NEArithmeticAdditionKernel::border_size() const
+{
+ const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
+ const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
+ return BorderSize(0, border, 0, 0);
+}