aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDiego Lopez Recas <Diego.LopezRecas@arm.com>2017-12-18 14:42:56 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:45:00 +0000
commit0021d750d66d199c411df00cdd8308c325f1fef3 (patch)
treeb96e618977442a8aab335c136d369a958998d416
parent5b6904b8d9cb5e8a343cde96fd5a8701f44dff90 (diff)
downloadComputeLibrary-0021d750d66d199c411df00cdd8308c325f1fef3.tar.gz
IVGCVSW-863 Broadcast support in CL/NEON Arithmetic Add
Also, added instrumentation to support generic tensor broadcasting for NEON and CL backends. Change-Id: I1bc5747a286e1a4b464c209067581e103d473b9a Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/114201 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/CL/ICLKernel.h124
-rw-r--r--arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h3
-rw-r--r--arm_compute/core/Dimensions.h27
-rw-r--r--arm_compute/core/Helpers.inl14
-rw-r--r--arm_compute/core/IAccessWindow.h2
-rw-r--r--arm_compute/core/ITensorInfo.h46
-rw-r--r--arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h1
-rw-r--r--arm_compute/core/TensorShape.h58
-rw-r--r--arm_compute/core/Window.h57
-rw-r--r--arm_compute/core/Window.inl76
-rw-r--r--arm_compute/runtime/CL/functions/CLArithmeticAddition.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NEArithmeticAddition.h4
-rw-r--r--arm_compute/runtime/NEON/functions/NELaplacianReconstruct.h4
-rw-r--r--src/core/CL/ICLKernel.cpp81
-rw-r--r--src/core/CL/kernels/CLArithmeticAdditionKernel.cpp154
-rw-r--r--src/core/CL/kernels/CLPermuteKernel.cpp14
-rw-r--r--src/core/IAccessWindow.cpp4
-rw-r--r--src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp185
-rw-r--r--src/core/NEON/kernels/NEConvolutionKernel.cpp6
-rw-r--r--src/core/Validate.cpp4
-rw-r--r--src/runtime/CL/functions/CLArithmeticAddition.cpp15
-rw-r--r--src/runtime/CL/functions/CLLaplacianReconstruct.cpp4
-rw-r--r--src/runtime/NEON/functions/NEArithmeticAddition.cpp15
-rw-r--r--src/runtime/NEON/functions/NELaplacianReconstruct.cpp4
-rw-r--r--tests/datasets/ShapeDatasets.h52
-rw-r--r--tests/framework/datasets/ContainerDataset.h5
-rw-r--r--tests/validation/CL/ArithmeticAddition.cpp21
-rw-r--r--tests/validation/NEON/ArithmeticAddition.cpp21
-rw-r--r--tests/validation/fixtures/ArithmeticAdditionFixture.h53
-rw-r--r--tests/validation/reference/ArithmeticAddition.cpp65
31 files changed, 754 insertions, 373 deletions
diff --git a/arm_compute/core/CL/ICLKernel.h b/arm_compute/core/CL/ICLKernel.h
index a1bc3eb8d2..e660ae55a0 100644
--- a/arm_compute/core/CL/ICLKernel.h
+++ b/arm_compute/core/CL/ICLKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,14 +41,40 @@ class Window;
/** Common interface for all the OpenCL kernels */
class ICLKernel : public IKernel
{
+private:
+ /** Returns the number of arguments enqueued per array object.
+ *
+ * @return The number of arguments enqueued per array object.
+ */
+ template <unsigned int dimension_size>
+ constexpr static unsigned int num_arguments_per_array()
+ {
+ return num_arguments_per_tensor<dimension_size>();
+ }
+ /** Returns the number of arguments enqueued per tensor object.
+ *
+ * @return The number of arguments enqueued per tensor object.
+ */
+ template <unsigned int dimension_size>
+ constexpr static unsigned int num_arguments_per_tensor()
+ {
+ return 2 + 2 * dimension_size;
+ }
+
public:
/** Constructor */
- ICLKernel();
+ ICLKernel()
+ : _kernel(nullptr), _lws_hint(CLKernelLibrary::get().default_ndrange()), _target(GPUTarget::MIDGARD), _config_id(arm_compute::default_config_id), _max_workgroup_size(0)
+ {
+ }
/** Returns a reference to the OpenCL kernel of this object.
*
* @return A reference to the OpenCL kernel of this object.
*/
- cl::Kernel &kernel();
+ cl::Kernel &kernel()
+ {
+ return _kernel;
+ }
/** Add the passed 1D array's parameters to the object's kernel's arguments starting from the index idx.
*
* @param[in,out] idx Index at which to start adding the array's arguments. Will be incremented by the number of kernel arguments set.
@@ -58,60 +84,90 @@ public:
* @param[in] window Window the kernel will be executed on.
*/
template <typename T>
- void add_1D_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window);
+ void add_1D_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
+ {
+ add_array_argument<T, 1>(idx, array, strides, num_dimensions, window);
+ }
/** Add the passed 1D tensor's parameters to the object's kernel's arguments starting from the index idx.
*
* @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
* @param[in] tensor Tensor to set as an argument of the object's kernel.
* @param[in] window Window the kernel will be executed on.
*/
- void add_1D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window);
+ void add_1D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
+ {
+ add_tensor_argument<1>(idx, tensor, window);
+ }
/** Add the passed 2D tensor's parameters to the object's kernel's arguments starting from the index idx.
*
* @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
* @param[in] tensor Tensor to set as an argument of the object's kernel.
* @param[in] window Window the kernel will be executed on.
*/
- void add_2D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window);
+ void add_2D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
+ {
+ add_tensor_argument<2>(idx, tensor, window);
+ }
/** Add the passed 3D tensor's parameters to the object's kernel's arguments starting from the index idx.
*
* @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
* @param[in] tensor Tensor to set as an argument of the object's kernel.
* @param[in] window Window the kernel will be executed on.
*/
- void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window);
+ void add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
+ {
+ add_tensor_argument<3>(idx, tensor, window);
+ }
/** Add the passed 4D tensor's parameters to the object's kernel's arguments starting from the index idx.
*
* @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set.
* @param[in] tensor Tensor to set as an argument of the object's kernel.
* @param[in] window Window the kernel will be executed on.
*/
- void add_4D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window);
+ void add_4D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
+ {
+ add_tensor_argument<4>(idx, tensor, window);
+ }
/** Returns the number of arguments enqueued per 1D array object.
*
* @return The number of arguments enqueues per 1D array object.
*/
- unsigned int num_arguments_per_1D_array() const;
+ constexpr static unsigned int num_arguments_per_1D_array()
+ {
+ return num_arguments_per_array<1>();
+ }
/** Returns the number of arguments enqueued per 1D tensor object.
*
* @return The number of arguments enqueues per 1D tensor object.
*/
- unsigned int num_arguments_per_1D_tensor() const;
+ constexpr static unsigned int num_arguments_per_1D_tensor()
+ {
+ return num_arguments_per_tensor<1>();
+ }
/** Returns the number of arguments enqueued per 2D tensor object.
*
* @return The number of arguments enqueues per 2D tensor object.
*/
- unsigned int num_arguments_per_2D_tensor() const;
+ constexpr static unsigned int num_arguments_per_2D_tensor()
+ {
+ return num_arguments_per_tensor<2>();
+ }
/** Returns the number of arguments enqueued per 3D tensor object.
*
* @return The number of arguments enqueues per 3D tensor object.
*/
- unsigned int num_arguments_per_3D_tensor() const;
+ constexpr static unsigned int num_arguments_per_3D_tensor()
+ {
+ return num_arguments_per_tensor<3>();
+ }
/** Returns the number of arguments enqueued per 4D tensor object.
*
* @return The number of arguments enqueues per 4D tensor object.
*/
- unsigned int num_arguments_per_4D_tensor() const;
+ constexpr static unsigned int num_arguments_per_4D_tensor()
+ {
+ return num_arguments_per_tensor<4>();
+ }
/** Enqueue the OpenCL kernel to process the given window on the passed OpenCL command queue.
*
* @note The queue is *not* flushed by this method, and therefore the kernel will not have been executed by the time this method returns.
@@ -161,7 +217,10 @@ public:
*
* @param[in] target The targeted GPU architecture
*/
- void set_target(GPUTarget target);
+ void set_target(GPUTarget target)
+ {
+ _target = target;
+ }
/** Set the targeted GPU architecture according to the CL device
*
@@ -173,7 +232,10 @@ public:
*
* @return The targeted GPU architecture.
*/
- GPUTarget get_target() const;
+ GPUTarget get_target() const
+ {
+ return _target;
+ }
/** Get the maximum workgroup size for the device the CLKernelLibrary uses.
*
@@ -207,18 +269,6 @@ private:
*/
template <unsigned int dimension_size>
void add_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window);
- /** Returns the number of arguments enqueued per array object.
- *
- * @return The number of arguments enqueued per array object.
- */
- template <unsigned int dimension_size>
- unsigned int num_arguments_per_array() const;
- /** Returns the number of arguments enqueued per tensor object.
- *
- * @return The number of arguments enqueued per tensor object.
- */
- template <unsigned int dimension_size>
- unsigned int num_arguments_per_tensor() const;
protected:
cl::Kernel _kernel; /**< OpenCL kernel to run */
@@ -246,6 +296,8 @@ void enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Window &window, c
template <typename T, unsigned int dimension_size>
void ICLKernel::add_array_argument(unsigned &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
{
+ ARM_COMPUTE_ERROR_ON(array == nullptr);
+
// Calculate offset to the start of the window
unsigned int offset_first_element = 0;
@@ -269,23 +321,5 @@ void ICLKernel::add_array_argument(unsigned &idx, const ICLArray<T> *array, cons
"add_%dD_array_argument() is supposed to add exactly %d arguments to the kernel", dimension_size, num_arguments_per_array<dimension_size>());
ARM_COMPUTE_UNUSED(idx_start);
}
-
-template <typename T>
-void ICLKernel::add_1D_array_argument(unsigned int &idx, const ICLArray<T> *array, const Strides &strides, unsigned int num_dimensions, const Window &window)
-{
- add_array_argument<T, 1>(idx, array, strides, num_dimensions, window);
-}
-
-template <unsigned int dimension_size>
-unsigned int ICLKernel::num_arguments_per_array() const
-{
- return num_arguments_per_tensor<dimension_size>();
-}
-
-template <unsigned int dimension_size>
-unsigned int ICLKernel::num_arguments_per_tensor() const
-{
- return 2 + 2 * dimension_size;
-}
}
#endif /*__ARM_COMPUTE_ICLKERNEL_H__ */
diff --git a/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h b/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
index 96b8dc8d48..5112476aae 100644
--- a/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
+++ b/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -72,6 +72,7 @@ public:
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
+ BorderSize border_size() const override;
private:
const ICLTensor *_input1; /**< Source tensor 1 */
diff --git a/arm_compute/core/Dimensions.h b/arm_compute/core/Dimensions.h
index ae8d6c3503..58ffd7ff3c 100644
--- a/arm_compute/core/Dimensions.h
+++ b/arm_compute/core/Dimensions.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -135,23 +135,24 @@ public:
* @param[in] n Number of dimensions to collapse into @p first.
* @param[in] first Dimensions into which the following @p n are collapsed.
*/
- void collapse(size_t n, size_t first = 0)
+ void collapse(const size_t n, const size_t first = 0)
{
ARM_COMPUTE_ERROR_ON(first + n > _id.size());
- if(n == 0)
+ const size_t last = std::min(_num_dimensions, first + n);
+
+ if(last > (first + 1))
{
- return;
+ // Collapse dimensions into the first
+ _id[first] = std::accumulate(&_id[first], &_id[last], 1, std::multiplies<T>());
+ // Shift the remaining dimensions down
+ std::copy(&_id[last], &_id[_num_dimensions], &_id[first + 1]);
+ // Reduce the number of dimensions
+ const size_t old_num_dimensions = _num_dimensions;
+ _num_dimensions -= last - first - 1;
+ // Fill the now empty dimensions with zero
+ std::fill(&_id[_num_dimensions], &_id[old_num_dimensions], 0);
}
-
- // Collapse dimensions into the first
- _id[first] = std::accumulate(_id.cbegin() + first, _id.cbegin() + first + n, 1, std::multiplies<T>());
- // Shift the remaining dimensions down
- std::copy(_id.begin() + first + n, _id.end(), _id.begin() + first + 1);
- // Reduce the number of dimensions
- _num_dimensions -= std::min(n, _num_dimensions) - 1;
- // Fill the now empty dimensions with zero
- std::fill(_id.begin() + _num_dimensions, _id.end(), 0);
}
/** Collapse dimensions starting from a given point
diff --git a/arm_compute/core/Helpers.inl b/arm_compute/core/Helpers.inl
index 6d0f8b0104..8b86c22676 100644
--- a/arm_compute/core/Helpers.inl
+++ b/arm_compute/core/Helpers.inl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -123,6 +123,11 @@ inline void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... i
{
w.validate();
+ for(unsigned int i = 0; i < Coordinates::num_max_dimensions; ++i)
+ {
+ ARM_COMPUTE_ERROR_ON(w[i].step() == 0);
+ }
+
Coordinates id;
ForEachDimension<Coordinates::num_max_dimensions>::unroll(w, id, std::forward<L>(lambda_function), std::forward<Ts>(iterators)...);
}
@@ -136,9 +141,10 @@ inline Iterator::Iterator(const ITensor *tensor, const Window &win)
: Iterator()
{
ARM_COMPUTE_ERROR_ON(tensor == nullptr);
- const ITensorInfo *info = tensor->info();
- ARM_COMPUTE_ERROR_ON(info == nullptr);
- const Strides &strides = info->strides_in_bytes();
+ ARM_COMPUTE_ERROR_ON(tensor->info() == nullptr);
+
+ const ITensorInfo *info = tensor->info();
+ const Strides &strides = info->strides_in_bytes();
_ptr = tensor->buffer() + info->offset_first_element_in_bytes();
diff --git a/arm_compute/core/IAccessWindow.h b/arm_compute/core/IAccessWindow.h
index 583041a48b..4bbcbb3a40 100644
--- a/arm_compute/core/IAccessWindow.h
+++ b/arm_compute/core/IAccessWindow.h
@@ -139,8 +139,8 @@ public:
}
AccessWindowRectangle(const AccessWindowRectangle &) = delete;
+ AccessWindowRectangle(AccessWindowRectangle &&) = delete;
AccessWindowRectangle &operator=(const AccessWindowRectangle &) = delete;
- AccessWindowRectangle(AccessWindowRectangle &&) = default;
AccessWindowRectangle &operator=(AccessWindowRectangle &&) = default;
~AccessWindowRectangle() = default;
diff --git a/arm_compute/core/ITensorInfo.h b/arm_compute/core/ITensorInfo.h
index 9112f3ea18..b5677dffd6 100644
--- a/arm_compute/core/ITensorInfo.h
+++ b/arm_compute/core/ITensorInfo.h
@@ -30,6 +30,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/utils/misc/ICloneable.h"
+#include "arm_compute/core/utils/misc/utility.h"
#include <cstddef>
@@ -221,6 +222,51 @@ public:
* @return A QuantizationInfo containing the scale and offset.
*/
virtual QuantizationInfo quantization_info() const = 0;
+
+ /** If infos are broadcast compatible tensor info's, return the broadcasted shape and the intersection of
+ * the broadcasted valid regions of the tensors.
+ *
+ * Two tensor info's are broadcast compatible if their shapes are broadcast compatible.
+ *
+ * Two tensor shapes are broadcast compatible if for each dimension, they're equal or one of them is 1.
+ *
+ * If two shapes are compatible, each dimension in the broadcasted shape is the max of the original dimensions.
+ *
+ * @param[in] infos Tensor info's.
+ *
+ * @return The broadcasted shape and valid region, or an empty shape and valid region if the info's are
+ * not broadcast compatible.
+ */
+ template <typename... Infos>
+ static std::pair<TensorShape, ValidRegion> broadcast_shape_and_valid_region(const Infos &... infos)
+ {
+ TensorShape bc_shape = TensorShape::broadcast_shape(infos.tensor_shape()...);
+ ValidRegion bc_valid_region{ Coordinates(), bc_shape };
+
+ auto broadcast_valid_region = [&bc_valid_region](const ITensorInfo & info)
+ {
+ if(info.num_dimensions() != 0)
+ {
+ for(size_t d = 0; d < bc_valid_region.shape.num_dimensions(); ++d)
+ {
+ const bool is_broadcast = (info.tensor_shape()[d] == 1);
+
+ const int anchor_max = std::max(bc_valid_region.anchor[d], info.valid_region().anchor[d]);
+ const size_t valid_min = std::min(bc_valid_region.shape[d], info.valid_region().shape[d]);
+
+ if(!is_broadcast || (valid_min == 0))
+ {
+ bc_valid_region.anchor.set(d, anchor_max);
+ bc_valid_region.shape.set(d, valid_min);
+ }
+ }
+ }
+ };
+
+ utility::for_each(broadcast_valid_region, infos...);
+
+ return std::pair<TensorShape, ValidRegion>(bc_shape, bc_valid_region);
+ }
};
}
#endif /*__ARM_COMPUTE_TENSORINFO_H__ */
diff --git a/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h b/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
index eedecfb524..155e792f5d 100644
--- a/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
+++ b/arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h
@@ -85,6 +85,7 @@ public:
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
+ BorderSize border_size() const override;
private:
/** Common signature for all the specialised add functions
diff --git a/arm_compute/core/TensorShape.h b/arm_compute/core/TensorShape.h
index 50f1211c18..dc836c98da 100644
--- a/arm_compute/core/TensorShape.h
+++ b/arm_compute/core/TensorShape.h
@@ -26,6 +26,7 @@
#include "arm_compute/core/Dimensions.h"
#include "arm_compute/core/Error.h"
+#include "arm_compute/core/utils/misc/utility.h"
#include <algorithm>
#include <array>
@@ -132,6 +133,19 @@ public:
std::fill(_id.begin() + _num_dimensions, _id.end(), 1);
}
+ /** Return a copy with collapsed dimensions starting from a given point.
+ *
+ * @param[in] start Starting point of collapsing dimensions.
+ *
+ * @return A copy with collapse dimensions starting from start.
+ */
+ TensorShape collapsed_from(size_t start) const
+ {
+ TensorShape copy(*this);
+ copy.collapse(num_dimensions(), start);
+ return copy;
+ }
+
/** Collapses all dimensions to a single linear total size.
*
* @return The total tensor size in terms of elements.
@@ -164,6 +178,50 @@ public:
return std::accumulate(_id.begin(), _id.begin() + dimension, 1, std::multiplies<size_t>());
}
+ /** If shapes are broadcast compatible, return the broadcasted shape.
+ *
+ * Two tensor shapes are broadcast compatible if for each dimension, they're equal or one of them is 1.
+ *
+ * If two shapes are compatible, each dimension in the broadcasted shape is the max of the original dimensions.
+ *
+ * @param[in] shapes Tensor shapes.
+ *
+ * @return The broadcasted shape or an empty shape if the shapes are not broadcast compatible.
+ */
+ template <typename... Shapes>
+ static TensorShape broadcast_shape(const Shapes &... shapes)
+ {
+ TensorShape bc_shape;
+
+ auto broadcast = [&bc_shape](const TensorShape & other)
+ {
+ if(bc_shape.num_dimensions() == 0)
+ {
+ bc_shape = other;
+ }
+ else if(other.num_dimensions() != 0)
+ {
+ for(size_t d = 0; d < TensorShape::num_max_dimensions; ++d)
+ {
+ const size_t dim_min = std::min(bc_shape[d], other[d]);
+ const size_t dim_max = std::max(bc_shape[d], other[d]);
+
+ if((dim_min != 1) && (dim_min != dim_max))
+ {
+ bc_shape = TensorShape{ 0U };
+ break;
+ }
+
+ bc_shape.set(d, dim_max);
+ }
+ }
+ };
+
+ utility::for_each(broadcast, shapes...);
+
+ return bc_shape;
+ }
+
private:
/** Remove trailing dimensions of size 1 from the reported number of dimensions. */
void apply_dimension_correction()
diff --git a/arm_compute/core/Window.h b/arm_compute/core/Window.h
index c890bf8f9e..cca12c9efe 100644
--- a/arm_compute/core/Window.h
+++ b/arm_compute/core/Window.h
@@ -104,6 +104,14 @@ public:
{
_step = step;
}
+ /** Set the dimension's end
+ *
+ * @param[in] end The new end
+ */
+ void set_end(int end)
+ {
+ _end = end;
+ }
private:
int _start; /**< Start of the dimension */
@@ -302,27 +310,64 @@ public:
return slide_window_slice<4>(slice);
}
+ /* Collapse the dimensions between @p first and @p last if possible.
+ *
+ * A dimension is collapsable if it starts from 0 and matches the corresponding dimension in the full_window
+ *
+ * @param[in] full_window Full window @p window has been created from.
+ * @param[in] first Start dimension into which the following are collapsed.
+ * @param[in] last End (exclusive) dimension to collapse.
+ * @param[out] has_collapsed (Optional) Whether the window was collapsed.
+ *
+ * @return Collapsed window.
+ */
+ Window collapse_if_possible(const Window &full_window, size_t first, size_t last, bool *has_collapsed = nullptr) const;
+
/* Collapse the dimensions higher than @p first if possible.
*
* A dimension is collapsable if it starts from 0 and matches the corresponding dimension in the full_window
*
- * @param[in] full_window Full window @p window has been created from.
- * @param[in] first Dimensions into which the following are collapsed.
+ * @param[in] full_window Full window @p window has been created from.
+ * @param[in] first Start dimension into which the following are collapsed.
+ * @param[out] has_collapsed (Optional) Whether the window was collapsed.
*
* @return Collapsed window.
*/
- Window collapse_if_possible(const Window &full_window, size_t first) const;
+ Window collapse_if_possible(const Window &full_window, size_t first, bool *has_collapsed = nullptr) const
+ {
+ return collapse_if_possible(full_window, first, Coordinates::num_max_dimensions, has_collapsed);
+ }
- /* Collapse the dimensions higher than @p first.
+ /* Collapse the dimensions between @p first and @p last.
*
* A dimension is collapsable if it starts from 0 and matches the corresponding dimension in the full_window
*
* @param[in] full_window Full window @p window has been created from.
- * @param[in] first Dimensions into which the following are collapsed.
+ * @param[in] first Start dimension into which the following are collapsed.
+ * @param[in] last End (exclusive) dimension to collapse.
*
* @return Collapsed window if successful.
*/
- Window collapse(const Window &full_window, size_t first) const;
+ Window collapse(const Window &full_window, size_t first, size_t last = Coordinates::num_max_dimensions) const;
+
+ /* Don't advance in the dimension where @p shape is less equal to 1.
+ *
+ * @param[in] shape A TensorShape.
+ *
+ * @return Broadcast window.
+ */
+ Window broadcast_if_dimension_le_one(const TensorShape &shape) const;
+
+ /* Don't advance in the dimension where shape of @p info is less equal to 1.
+ *
+ * @param[in] info An ITensorInfo.
+ *
+ * @return Broadcast window.
+ */
+ Window broadcast_if_dimension_le_one(const ITensorInfo &info) const
+ {
+ return broadcast_if_dimension_le_one(info.tensor_shape());
+ }
private:
/** First slice of the window
diff --git a/arm_compute/core/Window.inl b/arm_compute/core/Window.inl
index 1b21820f90..23b2a8e322 100644
--- a/arm_compute/core/Window.inl
+++ b/arm_compute/core/Window.inl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,55 +37,66 @@ inline constexpr const Window::Dimension &Window::operator[](size_t dimension) c
// Precondition: dimension < Coordinates::num_max_dimensions
return _dims.at(dimension);
}
+
inline void Window::set(size_t dimension, const Window::Dimension &dim)
{
ARM_COMPUTE_ERROR_ON(dimension >= Coordinates::num_max_dimensions);
_dims[dimension] = dim;
}
-inline Window Window::collapse_if_possible(const Window &full_window, size_t first) const
+inline Window Window::collapse_if_possible(const Window &full_window, const size_t first,
+ const size_t last, bool *has_collapsed) const
{
- bool is_collapsable = false;
- Window collapsed;
- for(size_t d = 0; d < Coordinates::num_max_dimensions; ++d)
+ Window collapsed(*this);
+
+ bool is_collapsable = true;
+ int collapsed_end = _dims[first].end();
+
+ for(size_t d = first + 1; is_collapsable && (d < last); ++d)
{
- if(is_collapsable)
- {
- collapsed.set(first, Window::Dimension(collapsed[first].end() * _dims[d].start(), collapsed[first].end() * _dims[d].end()));
- }
- else
- {
- collapsed.set(d, _dims[d]);
- }
+ // The _dims's dimension must match the full _dims dimension to be collapsable:
+ is_collapsable = (_dims[d].start() == 0) && (full_window[d].start() == 0) && (_dims[d].step() <= 1)
+ && (full_window[d].end() == _dims[d].end());
+ collapsed_end *= _dims[d].end();
+ }
- if(is_collapsable || d == first) // Try to start collapsing from this dimension
- {
- // The _dims's dimension must match the full _dims dimension to be collapsable:
- is_collapsable = _dims[d].start() == 0 && _dims[d].start() == full_window[d].start()
- && full_window[d].end() == _dims[d].end();
- }
- else
+ if(is_collapsable)
+ {
+ collapsed._dims.at(first).set_end(collapsed_end);
+ for(size_t d = first + 1; is_collapsable && (d < last); ++d)
{
- is_collapsable = false;
+ collapsed.set(d, Dimension());
}
}
+
+ if(has_collapsed != nullptr)
+ {
+ *has_collapsed = is_collapsable;
+ }
+
return collapsed;
}
-inline Window Window::collapse(const Window &full_window, size_t first) const
+inline Window Window::collapse(const Window &full_window, const size_t first, const size_t last) const
{
- Window collapsed = collapse_if_possible(full_window, first);
+ bool has_collapsed = false;
+ Window collapsed = collapse_if_possible(full_window, first, last, &has_collapsed);
// Make sure that the window has collapsed
- int end = _dims[first].end();
- int start = 0;
- ARM_COMPUTE_UNUSED(start);
- for(size_t d = first + 1; d < Coordinates::num_max_dimensions; ++d)
+ ARM_COMPUTE_ERROR_ON(!has_collapsed);
+ return collapsed;
+}
+
+inline Window Window::broadcast_if_dimension_le_one(const TensorShape &shape) const
+{
+ Window broadcastWin(*this);
+ for(size_t d = 0; d < TensorShape::num_max_dimensions; ++d)
{
- start = end * _dims[d].start();
- end *= _dims[d].end();
+ if(shape[d] <= 1)
+ {
+ broadcastWin.set(d, Dimension(0, 0, 0));
+ }
}
- ARM_COMPUTE_ERROR_ON((collapsed[first].end() != end) || (collapsed[first].start() != start));
- return collapsed;
+ return broadcastWin;
}
inline void Window::shift(size_t dimension, int shift_value)
@@ -129,9 +140,8 @@ inline void Window::validate() const
{
for(size_t i = 0; i < Coordinates::num_max_dimensions; ++i)
{
- ARM_COMPUTE_ERROR_ON(_dims[i].step() == 0);
ARM_COMPUTE_ERROR_ON(_dims[i].end() < _dims[i].start());
- ARM_COMPUTE_ERROR_ON((_dims[i].end() - _dims[i].start()) % _dims[i].step());
+ ARM_COMPUTE_ERROR_ON((_dims[i].step() != 0) && (((_dims[i].end() - _dims[i].start()) % _dims[i].step()) != 0));
}
}
diff --git a/arm_compute/runtime/CL/functions/CLArithmeticAddition.h b/arm_compute/runtime/CL/functions/CLArithmeticAddition.h
index 1ef3e274c7..921738d0c2 100644
--- a/arm_compute/runtime/CL/functions/CLArithmeticAddition.h
+++ b/arm_compute/runtime/CL/functions/CLArithmeticAddition.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,7 +46,7 @@ public:
* @param[out] output Output tensor. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
* @param[in] policy Policy to use to handle overflow.
*/
- void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
+ void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticAddition
*
* @param[in] input1 First tensor input info. Data types supported: U8/QS8/QS16/S16/F16/F32.
diff --git a/arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h b/arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h
index 4a676c85a0..6905b03652 100644
--- a/arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h
+++ b/arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -76,7 +76,7 @@ public:
* @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT.
*
*/
- void configure(const CLPyramid *pyramid, const ICLTensor *input, ICLTensor *output, BorderMode border_mode, uint8_t constant_border_value);
+ void configure(const CLPyramid *pyramid, ICLTensor *input, ICLTensor *output, BorderMode border_mode, uint8_t constant_border_value);
// Inherited methods overridden:
void run() override;
diff --git a/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h b/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
index 3718073937..c72d0b6d61 100644
--- a/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
+++ b/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,7 @@ public:
* @param[out] output Output tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
* @param[in] policy Policy to use to handle overflow.
*/
- void configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy);
+ void configure(ITensor *input1, ITensor *input2, ITensor *output, ConvertPolicy policy);
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticAddition
*
* @param[in] input1 First tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
diff --git a/arm_compute/runtime/NEON/functions/NELaplacianReconstruct.h b/arm_compute/runtime/NEON/functions/NELaplacianReconstruct.h
index 3d423607a3..2143042bd3 100644
--- a/arm_compute/runtime/NEON/functions/NELaplacianReconstruct.h
+++ b/arm_compute/runtime/NEON/functions/NELaplacianReconstruct.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -76,7 +76,7 @@ public:
* @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT.
*
*/
- void configure(const IPyramid *pyramid, const ITensor *input, ITensor *output, BorderMode border_mode, uint8_t constant_border_value);
+ void configure(const IPyramid *pyramid, ITensor *input, ITensor *output, BorderMode border_mode, uint8_t constant_border_value);
// Inherited methods overridden:
void run() override;
diff --git a/src/core/CL/ICLKernel.cpp b/src/core/CL/ICLKernel.cpp
index 7da74381d3..491e0c4b91 100644
--- a/src/core/CL/ICLKernel.cpp
+++ b/src/core/CL/ICLKernel.cpp
@@ -43,10 +43,11 @@ void arm_compute::enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Wind
return;
}
- // Make sure that dimensions > Z are 1
- for(unsigned int i = 3; i < Coordinates::num_max_dimensions; ++i)
+ for(unsigned int i = 0; i < Coordinates::num_max_dimensions; ++i)
{
- ARM_COMPUTE_ERROR_ON((window[i].end() - window[i].start()) != 1);
+ ARM_COMPUTE_ERROR_ON(window[i].step() == 0);
+ // Make sure that dimensions > Z are 1
+ ARM_COMPUTE_ERROR_ON((i >= 3) && ((window[i].end() - window[i].start()) != 1));
}
cl::NDRange gws = ICLKernel::gws_from_window(window);
@@ -77,16 +78,6 @@ void arm_compute::enqueue(cl::CommandQueue &queue, ICLKernel &kernel, const Wind
queue.enqueueNDRangeKernel(kernel.kernel(), cl::NullRange, gws, lws);
}
-ICLKernel::ICLKernel()
- : _kernel(nullptr), _lws_hint(CLKernelLibrary::get().default_ndrange()), _target(GPUTarget::MIDGARD), _config_id(arm_compute::default_config_id), _max_workgroup_size(0)
-{
-}
-
-cl::Kernel &ICLKernel::kernel()
-{
- return _kernel;
-}
-
template <unsigned int dimension_size>
void ICLKernel::add_tensor_argument(unsigned &idx, const ICLTensor *tensor, const Window &window)
{
@@ -106,10 +97,10 @@ void ICLKernel::add_tensor_argument(unsigned &idx, const ICLTensor *tensor, cons
unsigned int idx_start = idx;
_kernel.setArg(idx++, tensor->cl_buffer());
- for(unsigned int dimension = 0; dimension < dimension_size; dimension++)
+ for(unsigned int d = 0; d < dimension_size; ++d)
{
- _kernel.setArg<cl_uint>(idx++, strides[dimension]);
- _kernel.setArg<cl_uint>(idx++, strides[dimension] * window[dimension].step());
+ _kernel.setArg<cl_uint>(idx++, strides[d]);
+ _kernel.setArg<cl_uint>(idx++, strides[d] * window[d].step());
}
_kernel.setArg<cl_uint>(idx++, offset_first_element);
@@ -119,66 +110,16 @@ void ICLKernel::add_tensor_argument(unsigned &idx, const ICLTensor *tensor, cons
ARM_COMPUTE_UNUSED(idx_start);
}
-void ICLKernel::add_1D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
-{
- add_tensor_argument<1>(idx, tensor, window);
-}
-
-void ICLKernel::add_2D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
-{
- add_tensor_argument<2>(idx, tensor, window);
-}
-
-void ICLKernel::add_3D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
-{
- add_tensor_argument<3>(idx, tensor, window);
-}
-
-void ICLKernel::add_4D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window)
-{
- add_tensor_argument<4>(idx, tensor, window);
-}
-
-unsigned int ICLKernel::num_arguments_per_1D_array() const
-{
- return num_arguments_per_array<1>();
-}
-
-unsigned int ICLKernel::num_arguments_per_1D_tensor() const
-{
- return num_arguments_per_tensor<1>();
-}
-
-unsigned int ICLKernel::num_arguments_per_2D_tensor() const
-{
- return num_arguments_per_tensor<2>();
-}
-
-unsigned int ICLKernel::num_arguments_per_3D_tensor() const
-{
- return num_arguments_per_tensor<3>();
-}
-
-unsigned int ICLKernel::num_arguments_per_4D_tensor() const
-{
- return num_arguments_per_tensor<4>();
-}
+template void ICLKernel::add_tensor_argument<1>(unsigned &idx, const ICLTensor *tensor, const Window &window);
+template void ICLKernel::add_tensor_argument<2>(unsigned &idx, const ICLTensor *tensor, const Window &window);
+template void ICLKernel::add_tensor_argument<3>(unsigned &idx, const ICLTensor *tensor, const Window &window);
+template void ICLKernel::add_tensor_argument<4>(unsigned &idx, const ICLTensor *tensor, const Window &window);
void ICLKernel::set_target(cl::Device &device)
{
_target = get_target_from_device(device);
}
-void ICLKernel::set_target(GPUTarget target)
-{
- _target = target;
-}
-
-GPUTarget ICLKernel::get_target() const
-{
- return _target;
-}
-
size_t ICLKernel::get_max_workgroup_size()
{
if(_max_workgroup_size == 0)
diff --git a/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp b/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp
index 75701ee011..c4904ecbe1 100644
--- a/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp
+++ b/src/core/CL/kernels/CLArithmeticAdditionKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,60 +24,75 @@
#include "arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h"
#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/IAccessWindow.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/Window.h"
-
-#include <cstddef>
-#include <set>
-#include <string>
using namespace arm_compute;
namespace
{
-Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+constexpr unsigned int num_elems_processed_per_iteration = 16;
+
+Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output, ConvertPolicy policy)
{
ARM_COMPUTE_UNUSED(policy);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, input2);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input1, input2);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+
+ const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape());
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(&input1, &input2);
// Validate in case of configured output
- if((output != nullptr) && (output->total_size() != 0))
+ if(output.total_size() > 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->data_type() == DataType::U8 && (input1->data_type() != DataType::U8 || input2->data_type() != DataType::U8),
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((output.data_type() == DataType::U8) && ((input1.data_type() != DataType::U8) || (input2.data_type() != DataType::U8)),
"Output can only be U8 if both inputs are U8");
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, output);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input1, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0),
+ "Wrong shape for output");
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(&input1, &output);
}
return Status{};
}
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
{
- constexpr unsigned int num_elems_processed_per_iteration = 16;
+ const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(input1, input2);
+ const TensorShape &out_shape = broadcast_pair.first;
+ const ValidRegion &valid_region = broadcast_pair.second;
- Window win = calculate_max_window(*input1, Steps(num_elems_processed_per_iteration));
+ // Auto initialize output if not initialized
+ {
+ set_shape_if_empty(output, out_shape);
- AccessWindowHorizontal input1_access(input1, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal input2_access(input2, 0, num_elems_processed_per_iteration);
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
+ if(input1.data_type() == DataType::S16 || input2.data_type() == DataType::S16)
+ {
+ set_format_if_unknown(output, Format::S16);
+ }
+ else if(input1.data_type() == DataType::F16 && input2.data_type() == DataType::F16)
+ {
+ set_format_if_unknown(output, Format::F16);
+ }
+ else if(input1.data_type() == DataType::F32 || input2.data_type() == DataType::F32)
+ {
+ set_format_if_unknown(output, Format::F32);
+ }
+ }
+
+ Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration));
+ Window win_input1 = win.broadcast_if_dimension_le_one(input1);
+ Window win_input2 = win.broadcast_if_dimension_le_one(input2);
- bool window_changed = update_window_and_padding(win, input1_access, input2_access, output_access);
+ AccessWindowHorizontal input1_access(&input1, 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal input2_access(&input2, 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal output_access(&output, 0, num_elems_processed_per_iteration);
- ValidRegion valid_region = intersect_valid_regions(input1->valid_region(),
- input2->valid_region());
+ bool window_changed = update_window_and_padding(win_input1, input1_access)
+ || update_window_and_padding(win_input2, input2_access)
+ || update_window_and_padding(win, output_access);
output_access.set_valid_region(win, valid_region);
@@ -94,26 +109,11 @@ CLArithmeticAdditionKernel::CLArithmeticAdditionKernel()
void CLArithmeticAdditionKernel::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1->info(), *input2->info(), *output->info(), policy));
- // Auto initialize output if not initialized
- {
- set_shape_if_empty(*output->info(), input1->info()->tensor_shape());
-
- if(input1->info()->data_type() == DataType::S16 || input2->info()->data_type() == DataType::S16)
- {
- set_format_if_unknown(*output->info(), Format::S16);
- }
- else if(input1->info()->data_type() == DataType::F32 || input2->info()->data_type() == DataType::F32)
- {
- set_format_if_unknown(*output->info(), Format::F32);
- }
- else if(input1->info()->data_type() == DataType::F16 && input2->info()->data_type() == DataType::F16)
- {
- set_format_if_unknown(*output->info(), Format::F16);
- }
- }
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1->info(), input2->info(), output->info(), policy));
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(*input1->info(), *input2->info(), *output->info());
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
_input1 = input1;
_input2 = input2;
@@ -135,16 +135,15 @@ void CLArithmeticAdditionKernel::configure(const ICLTensor *input1, const ICLTen
// Create kernel
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("arithmetic_add", build_opts));
- // Configure kernel window
- auto win_config = validate_and_configure_window(input1->info(), input2->info(), output->info());
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure(win_config.second);
}
Status CLArithmeticAdditionKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input1, input2, output, policy));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
+
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input1, *input2, *output, policy));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(*input1->clone(), *input2->clone(), *output->clone()).first);
return Status{};
}
@@ -154,16 +153,49 @@ void CLArithmeticAdditionKernel::run(const Window &window, cl::CommandQueue &que
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
- Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
- Window slice = collapsed.first_slice_window_3D();
+ const TensorShape &in_shape1 = _input1->info()->tensor_shape();
+ const TensorShape &in_shape2 = _input2->info()->tensor_shape();
+ const TensorShape &out_shape = _output->info()->tensor_shape();
+
+ bool can_collapse = true;
+ if(std::min(in_shape1.total_size(), in_shape2.total_size()) > 1)
+ {
+ can_collapse = (std::min(in_shape1.num_dimensions(), in_shape2.num_dimensions()) > Window::DimZ);
+ for(size_t d = Window::DimZ; can_collapse && (d < out_shape.num_dimensions()); d++)
+ {
+ can_collapse = (in_shape1[d] == in_shape2[d]);
+ }
+ }
+
+ bool has_collapsed = false;
+ Window collapsed = can_collapse ? window.collapse_if_possible(ICLKernel::window(), Window::DimZ, &has_collapsed) : window;
+
+ const TensorShape &in_shape1_collapsed = has_collapsed ? in_shape1.collapsed_from(Window::DimZ) : in_shape1;
+ const TensorShape &in_shape2_collapsed = has_collapsed ? in_shape2.collapsed_from(Window::DimZ) : in_shape2;
+
+ Window slice = collapsed.first_slice_window_3D();
+ Window slice_input1 = slice.broadcast_if_dimension_le_one(in_shape1_collapsed);
+ Window slice_input2 = slice.broadcast_if_dimension_le_one(in_shape2_collapsed);
do
{
unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input1, slice);
- add_3D_tensor_argument(idx, _input2, slice);
+
+ add_3D_tensor_argument(idx, _input1, slice_input1);
+ add_3D_tensor_argument(idx, _input2, slice_input2);
add_3D_tensor_argument(idx, _output, slice);
+
enqueue(queue, *this, slice);
+
+ collapsed.slide_window_slice_3D(slice_input1);
+ collapsed.slide_window_slice_3D(slice_input2);
}
while(collapsed.slide_window_slice_3D(slice));
}
+
+BorderSize CLArithmeticAdditionKernel::border_size() const
+{
+ const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
+ const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
+ return BorderSize(0, border, 0, 0);
+}
diff --git a/src/core/CL/kernels/CLPermuteKernel.cpp b/src/core/CL/kernels/CLPermuteKernel.cpp
index 132de60b68..1f36445732 100644
--- a/src/core/CL/kernels/CLPermuteKernel.cpp
+++ b/src/core/CL/kernels/CLPermuteKernel.cpp
@@ -106,10 +106,10 @@ void CLPermuteKernel::run(const Window &window, cl::CommandQueue &queue)
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(ICLKernel::window(), window);
- Window slice_in = window.first_slice_window_4D();
- Window slice_out(slice_in);
+ Window slice_in = window.first_slice_window_4D().collapse(ICLKernel::window(), 2, 4);
// Setup output slice
+ Window slice_out(slice_in);
slice_out.set(Window::DimX, Window::Dimension(0, 0, 0));
slice_out.set(Window::DimY, Window::Dimension(0, 0, 0));
slice_out.set(Window::DimZ, Window::Dimension(0, 0, 0));
@@ -117,12 +117,10 @@ void CLPermuteKernel::run(const Window &window, cl::CommandQueue &queue)
do
{
- auto collapsed_slice_in = slice_in.collapse(ICLKernel::window(), 2);
- auto collapsed_slice_out = slice_out.collapse(ICLKernel::window(), 2);
- unsigned int idx = 0;
- add_4D_tensor_argument(idx, _input, collapsed_slice_in);
- add_4D_tensor_argument(idx, _output, collapsed_slice_out);
- enqueue(queue, *this, collapsed_slice_in);
+ unsigned int idx = 0;
+ add_4D_tensor_argument(idx, _input, slice_in);
+ add_4D_tensor_argument(idx, _output, slice_out);
+ enqueue(queue, *this, slice_in);
}
while(window.slide_window_slice_4D(slice_in) && window.slide_window_slice_4D(slice_out));
}
diff --git a/src/core/IAccessWindow.cpp b/src/core/IAccessWindow.cpp
index 7dfe5db5c5..c73f4e7bb2 100644
--- a/src/core/IAccessWindow.cpp
+++ b/src/core/IAccessWindow.cpp
@@ -207,8 +207,8 @@ bool AccessWindowRectangle::update_padding_if_needed(const Window &window)
return false;
}
- ARM_COMPUTE_ERROR_ON(window.x().step() * _scale_x == 0);
- ARM_COMPUTE_ERROR_ON(window.y().step() * _scale_y == 0);
+ ARM_COMPUTE_ERROR_ON(_scale_x == 0);
+ ARM_COMPUTE_ERROR_ON(_scale_y == 0);
const int min_x = window.x().start() * _scale_x + _x;
const int max_x = (window.x().end() - window.x().step()) * _scale_x + _x + _width;
diff --git a/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp b/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
index 8a98cf7cbc..a487090a98 100644
--- a/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
+++ b/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,10 +46,12 @@ class Coordinates;
namespace
{
+constexpr unsigned int num_elems_processed_per_iteration = 16;
+
void add_wrap_QS8_QS8_QS8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -64,8 +66,8 @@ void add_wrap_QS8_QS8_QS8(const ITensor *in1, const ITensor *in2, ITensor *out,
void add_saturate_QS8_QS8_QS8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -80,8 +82,8 @@ void add_saturate_QS8_QS8_QS8(const ITensor *in1, const ITensor *in2, ITensor *o
void add_wrap_U8_U8_U8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -93,8 +95,8 @@ void add_wrap_U8_U8_U8(const ITensor *in1, const ITensor *in2, ITensor *out, con
void add_saturate_U8_U8_U8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -163,8 +165,8 @@ inline float16x8x2_t vadd2q_f16(const float16x8x2_t &a, const float16x8x2_t &b)
void add_F16_F16_F16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -186,8 +188,8 @@ void add_F16_F16_F16(const ITensor *in1, const ITensor *in2, ITensor *out, const
void add_F32_F32_F32(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -202,8 +204,8 @@ void add_F32_F32_F32(const ITensor *in1, const ITensor *in2, ITensor *out, const
void add_wrap_S16_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -218,8 +220,8 @@ void add_wrap_S16_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *out,
void add_saturate_S16_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -234,8 +236,8 @@ void add_saturate_S16_S16_S16(const ITensor *in1, const ITensor *in2, ITensor *o
void add_wrap_S16_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -257,8 +259,8 @@ void add_wrap_S16_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, c
void add_saturate_S16_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -292,8 +294,8 @@ inline void add_saturate_U8_S16_S16(const ITensor *input1, const ITensor *input2
void add_wrap_U8_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -325,8 +327,8 @@ void add_wrap_U8_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, co
void add_saturate_U8_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
{
- Iterator input1(in1, window);
- Iterator input2(in2, window);
+ Iterator input1(in1, window.broadcast_if_dimension_le_one(in1->info()->tensor_shape()));
+ Iterator input2(in2, window.broadcast_if_dimension_le_one(in2->info()->tensor_shape()));
Iterator output(out, window);
execute_window_loop(window, [&](const Coordinates & id)
@@ -356,50 +358,84 @@ void add_saturate_U8_U8_S16(const ITensor *in1, const ITensor *in2, ITensor *out
input1, input2, output);
}
-inline Status validate_arguments(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output, ConvertPolicy policy)
{
ARM_COMPUTE_UNUSED(policy);
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input1, input2, output);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input1, 1, DataType::QS8, DataType::U8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input2, 1, DataType::QS8, DataType::U8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QS8, DataType::U8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
- if(is_data_type_fixed_point(input1->data_type()) || is_data_type_fixed_point(input2->data_type()) || is_data_type_fixed_point(output->data_type()))
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input2, 1, DataType::U8, DataType::QS8, DataType::QS16, DataType::S16, DataType::F16, DataType::F32);
+
+ const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape());
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
+
+ if(is_data_type_fixed_point(input1.data_type()) || is_data_type_fixed_point(input2.data_type()))
{
- // Check that all data types are the same and all fixed-point positions are the same
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input1, input2, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(&input1, &input2);
}
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(
- !(input1->data_type() == DataType::QS8 && input2->data_type() == DataType::QS8 && output->data_type() == DataType::QS8)
- && !(input1->data_type() == DataType::U8 && input2->data_type() == DataType::U8 && output->data_type() == DataType::U8)
- && !(input1->data_type() == DataType::U8 && input2->data_type() == DataType::U8 && output->data_type() == DataType::S16)
- && !(input1->data_type() == DataType::U8 && input2->data_type() == DataType::S16 && output->data_type() == DataType::S16)
- && !(input1->data_type() == DataType::S16 && input2->data_type() == DataType::U8 && output->data_type() == DataType::S16)
- && !(input1->data_type() == DataType::QS16 && input2->data_type() == DataType::QS16 && output->data_type() == DataType::QS16)
- && !(input1->data_type() == DataType::S16 && input2->data_type() == DataType::S16 && output->data_type() == DataType::S16)
- && !(input1->data_type() == DataType::F32 && input2->data_type() == DataType::F32 && output->data_type() == DataType::F32)
- && !(input1->data_type() == DataType::F16 && input2->data_type() == DataType::F16 && output->data_type() == DataType::F16),
- "You called addition with the wrong image formats");
+ // Validate in case of configured output
+ if(output.total_size() > 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(
+ !(input1.data_type() == DataType::QS8 && input2.data_type() == DataType::QS8 && output.data_type() == DataType::QS8)
+ && !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8 && output.data_type() == DataType::U8)
+ && !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::U8 && output.data_type() == DataType::S16)
+ && !(input1.data_type() == DataType::U8 && input2.data_type() == DataType::S16 && output.data_type() == DataType::S16)
+ && !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::U8 && output.data_type() == DataType::S16)
+ && !(input1.data_type() == DataType::QS16 && input2.data_type() == DataType::QS16 && output.data_type() == DataType::QS16)
+ && !(input1.data_type() == DataType::S16 && input2.data_type() == DataType::S16 && output.data_type() == DataType::S16)
+ && !(input1.data_type() == DataType::F32 && input2.data_type() == DataType::F32 && output.data_type() == DataType::F32)
+ && !(input1.data_type() == DataType::F16 && input2.data_type() == DataType::F16 && output.data_type() == DataType::F16),
+ "You called addition with the wrong image formats");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0),
+ "Wrong shape for output");
+
+ if(is_data_type_fixed_point(input1.data_type()) || is_data_type_fixed_point(output.data_type()))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(&input1, &output);
+ }
+ }
return Status{};
}
-inline std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output)
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output)
{
- constexpr unsigned int num_elems_processed_per_iteration = 16;
+ const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(input1, input2);
+ const TensorShape &out_shape = broadcast_pair.first;
+ const ValidRegion &valid_region = broadcast_pair.second;
- // Configure kernel window
- Window win = calculate_max_window(*input1, Steps(num_elems_processed_per_iteration));
- AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
+ // Auto initialize output if not initialized
+ {
+ set_shape_if_empty(output, out_shape);
+
+ if(input1.data_type() == DataType::S16 || input2.data_type() == DataType::S16)
+ {
+ set_format_if_unknown(output, Format::S16);
+ }
+ else if(input1.data_type() == DataType::F16 && input2.data_type() == DataType::F16)
+ {
+ set_format_if_unknown(output, Format::F16);
+ }
+ else if(input1.data_type() == DataType::F32 || input2.data_type() == DataType::F32)
+ {
+ set_format_if_unknown(output, Format::F32);
+ }
+ }
+
+ Window win = calculate_max_window(valid_region, Steps(num_elems_processed_per_iteration));
+ Window win_input1 = win.broadcast_if_dimension_le_one(input1);
+ Window win_input2 = win.broadcast_if_dimension_le_one(input2);
- bool window_changed = update_window_and_padding(win,
- AccessWindowHorizontal(input1, 0, num_elems_processed_per_iteration),
- AccessWindowHorizontal(input2, 0, num_elems_processed_per_iteration),
- output_access);
+ AccessWindowHorizontal input1_access(&input1, 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal input2_access(&input2, 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal output_access(&output, 0, num_elems_processed_per_iteration);
- ValidRegion valid_region = intersect_valid_regions(input1->valid_region(),
- input2->valid_region());
+ bool window_changed = update_window_and_padding(win_input1, input1_access)
+ || update_window_and_padding(win_input2, input2_access)
+ || update_window_and_padding(win, output_access);
output_access.set_valid_region(win, valid_region);
@@ -416,26 +452,11 @@ NEArithmeticAdditionKernel::NEArithmeticAdditionKernel()
void NEArithmeticAdditionKernel::configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1->info(), *input2->info(), *output->info(), policy));
- // Auto initialize output if not initialized
- {
- set_shape_if_empty(*output->info(), input1->info()->tensor_shape());
-
- if(input1->info()->data_type() == DataType::S16 || input2->info()->data_type() == DataType::S16)
- {
- set_format_if_unknown(*output->info(), Format::S16);
- }
- else if(input1->info()->data_type() == DataType::F16 || input2->info()->data_type() == DataType::F16)
- {
- set_format_if_unknown(*output->info(), Format::F16);
- }
- else if(input1->info()->data_type() == DataType::F32 || input2->info()->data_type() == DataType::F32)
- {
- set_format_if_unknown(*output->info(), Format::F32);
- }
- }
-
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input1->info(), input2->info(), output->info(), policy));
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(*input1->info(), *input2->info(), *output->info());
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
static std::map<std::string, AddFunction *> map_function =
{
@@ -476,16 +497,15 @@ void NEArithmeticAdditionKernel::configure(const ITensor *input1, const ITensor
_func = it->second;
}
- // Configure kernel window
- auto win_config = validate_and_configure_window(input1->info(), input2->info(), output->info());
- ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
INEKernel::configure(win_config.second);
}
Status NEArithmeticAdditionKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
{
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input1, input2, output, policy));
- ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input1->clone().get(), input2->clone().get(), output->clone().get()).first);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
+
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input1, *input2, *output, policy));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(*input1->clone(), *input2->clone(), *output->clone()).first);
return Status{};
}
@@ -499,3 +519,10 @@ void NEArithmeticAdditionKernel::run(const Window &window, const ThreadInfo &inf
(*_func)(_input1, _input2, _output, window);
}
+
+BorderSize NEArithmeticAdditionKernel::border_size() const
+{
+ const unsigned int replicateSize = _output->info()->dimension(0) - std::min(_input1->info()->dimension(0), _input2->info()->dimension(0));
+ const unsigned int border = std::min<unsigned int>(num_elems_processed_per_iteration - 1U, replicateSize);
+ return BorderSize(0, border, 0, 0);
+}
diff --git a/src/core/NEON/kernels/NEConvolutionKernel.cpp b/src/core/NEON/kernels/NEConvolutionKernel.cpp
index 7468f58ca5..0a10546b7b 100644
--- a/src/core/NEON/kernels/NEConvolutionKernel.cpp
+++ b/src/core/NEON/kernels/NEConvolutionKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -1456,8 +1456,8 @@ void NEConvolutionRectangleKernel::configure(const ITensor *input, ITensor *outp
constexpr unsigned int num_elems_read_per_iteration = 16;
constexpr unsigned int num_elems_written_per_iteration = 8;
- Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration), border_undefined, _border_size);
- AccessWindowHorizontal output_access = AccessWindowHorizontal(output->info(), 0, num_elems_written_per_iteration);
+ Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration), border_undefined, _border_size);
+ AccessWindowHorizontal output_access(output->info(), 0, num_elems_written_per_iteration);
update_window_and_padding(win,
AccessWindowRectangle(input->info(), -_border_size.left, -_border_size.top, num_elems_read_per_iteration, height),
diff --git a/src/core/Validate.cpp b/src/core/Validate.cpp
index f495e488e2..f5f9f1f8f7 100644
--- a/src/core/Validate.cpp
+++ b/src/core/Validate.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,7 +82,7 @@ arm_compute::Status arm_compute::error_on_window_dimensions_gte(const char *func
{
for(unsigned int i = max_dim; i < arm_compute::Coordinates::num_max_dimensions; ++i)
{
- ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG(win[i].start() != 0 || win[i].end() != win[i].step(),
+ ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG((win[i].start() != 0) || (win[i].end() != win[i].step()),
function, file, line,
"Maximum number of dimensions expected %u but dimension %u is not empty", max_dim, i);
}
diff --git a/src/runtime/CL/functions/CLArithmeticAddition.cpp b/src/runtime/CL/functions/CLArithmeticAddition.cpp
index 5c2e582ba2..0b05058c4d 100644
--- a/src/runtime/CL/functions/CLArithmeticAddition.cpp
+++ b/src/runtime/CL/functions/CLArithmeticAddition.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,6 +23,7 @@
*/
#include "arm_compute/runtime/CL/functions/CLArithmeticAddition.h"
+#include "arm_compute/core/CL/ICLTensor.h"
#include "arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h"
#include "support/ToolchainSupport.h"
@@ -30,11 +31,21 @@
using namespace arm_compute;
-void CLArithmeticAddition::configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy)
+void CLArithmeticAddition::configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy)
{
auto k = arm_compute::support::cpp14::make_unique<CLArithmeticAdditionKernel>();
k->configure(input1, input2, output, policy);
_kernel = std::move(k);
+
+ if(output->info()->dimension(0) > 1)
+ {
+ ICLTensor *broadcasted_info = (input1->info()->dimension(0) == 1) ? input1 : input2;
+
+ if(broadcasted_info->info()->dimension(0) == 1)
+ {
+ _border_handler.configure(broadcasted_info, _kernel->border_size(), BorderMode::REPLICATE);
+ }
+ }
}
Status CLArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
diff --git a/src/runtime/CL/functions/CLLaplacianReconstruct.cpp b/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
index 678848b82e..911c9b3b27 100644
--- a/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
+++ b/src/runtime/CL/functions/CLLaplacianReconstruct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,7 @@ CLLaplacianReconstruct::CLLaplacianReconstruct() // NOLINT
{
}
-void CLLaplacianReconstruct::configure(const CLPyramid *pyramid, const ICLTensor *input, ICLTensor *output, BorderMode border_mode, uint8_t constant_border_value)
+void CLLaplacianReconstruct::configure(const CLPyramid *pyramid, ICLTensor *input, ICLTensor *output, BorderMode border_mode, uint8_t constant_border_value)
{
ARM_COMPUTE_ERROR_ON(nullptr == pyramid);
ARM_COMPUTE_ERROR_ON(input == output);
diff --git a/src/runtime/NEON/functions/NEArithmeticAddition.cpp b/src/runtime/NEON/functions/NEArithmeticAddition.cpp
index b5dd4d0d06..7d8e3cff1c 100644
--- a/src/runtime/NEON/functions/NEArithmeticAddition.cpp
+++ b/src/runtime/NEON/functions/NEArithmeticAddition.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,6 +23,7 @@
*/
#include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
+#include "arm_compute/core/ITensor.h"
#include "arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h"
#include "support/ToolchainSupport.h"
@@ -30,11 +31,21 @@
using namespace arm_compute;
-void NEArithmeticAddition::configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy)
+void NEArithmeticAddition::configure(ITensor *input1, ITensor *input2, ITensor *output, ConvertPolicy policy)
{
auto k = arm_compute::support::cpp14::make_unique<NEArithmeticAdditionKernel>();
k->configure(input1, input2, output, policy);
_kernel = std::move(k);
+
+ if(output->info()->dimension(0) > 1)
+ {
+ ITensor *broadcasted_info = (input1->info()->dimension(0) == 1) ? input1 : input2;
+
+ if(broadcasted_info->info()->dimension(0) == 1)
+ {
+ _border_handler.configure(broadcasted_info, _kernel->border_size(), BorderMode::REPLICATE);
+ }
+ }
}
Status NEArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
{
diff --git a/src/runtime/NEON/functions/NELaplacianReconstruct.cpp b/src/runtime/NEON/functions/NELaplacianReconstruct.cpp
index 0893701cd5..9ad9689b13 100644
--- a/src/runtime/NEON/functions/NELaplacianReconstruct.cpp
+++ b/src/runtime/NEON/functions/NELaplacianReconstruct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,7 @@ NELaplacianReconstruct::NELaplacianReconstruct() // NOLINT
{
}
-void NELaplacianReconstruct::configure(const IPyramid *pyramid, const ITensor *input, ITensor *output, BorderMode border_mode, uint8_t constant_border_value)
+void NELaplacianReconstruct::configure(const IPyramid *pyramid, ITensor *input, ITensor *output, BorderMode border_mode, uint8_t constant_border_value)
{
ARM_COMPUTE_ERROR_ON(nullptr == pyramid);
ARM_COMPUTE_ERROR_ON(input == output);
diff --git a/tests/datasets/ShapeDatasets.h b/tests/datasets/ShapeDatasets.h
index 7d4f2b866d..79e052c697 100644
--- a/tests/datasets/ShapeDatasets.h
+++ b/tests/datasets/ShapeDatasets.h
@@ -117,6 +117,34 @@ public:
}
};
+/** Data set containing pairs of small tensor shapes that are broadcast compatible. */
+class SmallShapesBroadcast final : public framework::dataset::ZipDataset<ShapeDataset, ShapeDataset>
+{
+public:
+ SmallShapesBroadcast()
+ : ZipDataset<ShapeDataset, ShapeDataset>(
+ ShapeDataset("Shape0",
+ {
+ TensorShape{ 9U, 9U },
+ TensorShape{ 27U, 13U, 2U },
+ TensorShape{ 128U, 1U, 5U, 3U },
+ TensorShape{ 9U, 9U, 3U, 4U },
+ TensorShape{ 27U, 13U, 2U, 4U },
+ TensorShape{ 1U, 1U, 1U, 5U }
+ }),
+ ShapeDataset("Shape1",
+ {
+ TensorShape{ 9U, 1U, 2U },
+ TensorShape{ 1U, 13U, 2U },
+ TensorShape{ 128U, 64U, 1U, 3U },
+ TensorShape{ 9U, 1U, 3U },
+ TensorShape{ 1U },
+ TensorShape{ 9U, 9U, 3U, 5U }
+ }))
+ {
+ }
+};
+
/** Data set containing medium tensor shapes. */
class MediumShapes final : public ShapeDataset
{
@@ -172,6 +200,30 @@ public:
}
};
+/** Data set containing pairs of large tensor shapes that are broadcast compatible. */
+class LargeShapesBroadcast final : public framework::dataset::ZipDataset<ShapeDataset, ShapeDataset>
+{
+public:
+ LargeShapesBroadcast()
+ : ZipDataset<ShapeDataset, ShapeDataset>(
+ ShapeDataset("Shape0",
+ {
+ TensorShape{ 1921U, 541U },
+ TensorShape{ 1U, 485U, 2U, 3U },
+ TensorShape{ 4159U, 1U },
+ TensorShape{ 799U }
+ }),
+ ShapeDataset("Shape1",
+ {
+ TensorShape{ 1921U, 1U, 2U },
+ TensorShape{ 641U, 1U, 2U, 3U },
+ TensorShape{ 1U, 127U, 25U },
+ TensorShape{ 799U, 595U, 1U, 4U }
+ }))
+ {
+ }
+};
+
/** Data set containing large 1D tensor shapes. */
class Large1DShapes final : public ShapeDataset
{
diff --git a/tests/framework/datasets/ContainerDataset.h b/tests/framework/datasets/ContainerDataset.h
index bdca97cbac..80616c46fc 100644
--- a/tests/framework/datasets/ContainerDataset.h
+++ b/tests/framework/datasets/ContainerDataset.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -72,7 +72,8 @@ public:
{
}
- ContainerDataset(ContainerDataset &&) = default;
+ ContainerDataset(const ContainerDataset &) = default;
+ ContainerDataset(ContainerDataset &&) = default;
/** Type of the dataset. */
using type = std::tuple<container_value_type>;
diff --git a/tests/validation/CL/ArithmeticAddition.cpp b/tests/validation/CL/ArithmeticAddition.cpp
index 787b1b986f..4c19670d50 100644
--- a/tests/validation/CL/ArithmeticAddition.cpp
+++ b/tests/validation/CL/ArithmeticAddition.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -259,6 +259,25 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticAdditionFixture<float>, framework::
// Validate output
validate(CLAccessor(_target), _reference);
}
+
+template <typename T>
+using CLArithmeticAdditionBroadcastFixture = ArithmeticAdditionBroadcastValidationFixture<CLTensor, CLAccessor, CLArithmeticAddition, T>;
+
+FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticAdditionBroadcastFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapesBroadcast(),
+ ArithmeticAdditionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, CLArithmeticAdditionBroadcastFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapesBroadcast(),
+ ArithmeticAdditionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
TEST_SUITE_END()
TEST_SUITE_END()
diff --git a/tests/validation/NEON/ArithmeticAddition.cpp b/tests/validation/NEON/ArithmeticAddition.cpp
index e20e8df665..32a4ff3a4d 100644
--- a/tests/validation/NEON/ArithmeticAddition.cpp
+++ b/tests/validation/NEON/ArithmeticAddition.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -263,6 +263,25 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NEArithmeticAdditionFixture<float>, framework::
// Validate output
validate(Accessor(_target), _reference);
}
+
+template <typename T>
+using NEArithmeticAdditionBroadcastFixture = ArithmeticAdditionBroadcastValidationFixture<Tensor, Accessor, NEArithmeticAddition, T>;
+
+FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, NEArithmeticAdditionBroadcastFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapesBroadcast(),
+ ArithmeticAdditionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, NEArithmeticAdditionBroadcastFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapesBroadcast(),
+ ArithmeticAdditionFP32Dataset),
+ framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
TEST_SUITE_END()
TEST_SUITE_END()
diff --git a/tests/validation/fixtures/ArithmeticAdditionFixture.h b/tests/validation/fixtures/ArithmeticAdditionFixture.h
index c3a51b97d1..f3888ae565 100644
--- a/tests/validation/fixtures/ArithmeticAdditionFixture.h
+++ b/tests/validation/fixtures/ArithmeticAdditionFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,15 +41,14 @@ namespace test
namespace validation
{
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
-class ArithmeticAdditionValidationFixedPointFixture : public framework::Fixture
+class ArithmeticAdditionBroadcastValidationFixedPointFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits)
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits)
{
- _fractional_bits = fractional_bits;
- _target = compute_target(shape, data_type0, data_type1, output_data_type, convert_policy, fractional_bits);
- _reference = compute_reference(shape, data_type0, data_type1, output_data_type, convert_policy, fractional_bits);
+ _target = compute_target(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, fractional_bits);
+ _reference = compute_reference(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, fractional_bits);
}
protected:
@@ -59,12 +58,13 @@ protected:
library->fill_tensor_uniform(tensor, i);
}
- TensorType compute_target(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fixed_point_position)
+ TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
+ int fixed_point_position)
{
// Create tensors
- TensorType ref_src1 = create_tensor<TensorType>(shape, data_type0, 1, fixed_point_position);
- TensorType ref_src2 = create_tensor<TensorType>(shape, data_type1, 1, fixed_point_position);
- TensorType dst = create_tensor<TensorType>(shape, output_data_type, 1, fixed_point_position);
+ TensorType ref_src1 = create_tensor<TensorType>(shape0, data_type0, 1, fixed_point_position);
+ TensorType ref_src2 = create_tensor<TensorType>(shape1, data_type1, 1, fixed_point_position);
+ TensorType dst = create_tensor<TensorType>(TensorShape::broadcast_shape(shape0, shape1), output_data_type, 1, fixed_point_position);
// Create and configure function
FunctionType add;
@@ -93,11 +93,12 @@ protected:
return dst;
}
- SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fixed_point_position)
+ SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy,
+ int fixed_point_position)
{
// Create reference
- SimpleTensor<T> ref_src1{ shape, data_type0, 1, fixed_point_position };
- SimpleTensor<T> ref_src2{ shape, data_type1, 1, fixed_point_position };
+ SimpleTensor<T> ref_src1{ shape0, data_type0, 1, fixed_point_position };
+ SimpleTensor<T> ref_src2{ shape1, data_type1, 1, fixed_point_position };
// Fill reference
fill(ref_src1, 0);
@@ -108,14 +109,36 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- int _fractional_bits{};
};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArithmeticAdditionBroadcastValidationFixture : public ArithmeticAdditionBroadcastValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape0, const TensorShape &shape1, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
+ {
+ ArithmeticAdditionBroadcastValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, data_type0, data_type1, output_data_type, convert_policy, 0);
+ }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ArithmeticAdditionValidationFixedPointFixture : public ArithmeticAdditionBroadcastValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+ template <typename...>
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy, int fractional_bits)
+ {
+ ArithmeticAdditionBroadcastValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, shape, data_type0, data_type1, output_data_type, convert_policy, fractional_bits);
+ }
+};
+
template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
class ArithmeticAdditionValidationFixture : public ArithmeticAdditionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>
{
public:
template <typename...>
- void setup(TensorShape shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
+ void setup(const TensorShape &shape, DataType data_type0, DataType data_type1, DataType output_data_type, ConvertPolicy convert_policy)
{
ArithmeticAdditionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type0, data_type1, output_data_type, convert_policy, 0);
}
diff --git a/tests/validation/reference/ArithmeticAddition.cpp b/tests/validation/reference/ArithmeticAddition.cpp
index 82dd1437cd..17020a6277 100644
--- a/tests/validation/reference/ArithmeticAddition.cpp
+++ b/tests/validation/reference/ArithmeticAddition.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,27 +35,72 @@ namespace validation
{
namespace reference
{
+namespace
+{
template <typename T>
-SimpleTensor<T> arithmetic_addition(const SimpleTensor<T> &src1, const SimpleTensor<T> &src2, DataType dst_data_type, ConvertPolicy convert_policy)
+T add(T src1, T src2, ConvertPolicy convert_policy)
{
- SimpleTensor<T> result(src1.shape(), dst_data_type);
-
using intermediate_type = typename common_promoted_signed_type<T>::intermediate_type;
- for(int i = 0; i < src1.num_elements(); ++i)
+ intermediate_type val = static_cast<intermediate_type>(src1) + static_cast<intermediate_type>(src2);
+
+ T result = (convert_policy == ConvertPolicy::SATURATE) ? saturate_cast<T>(val) : static_cast<T>(val);
+
+ return result;
+}
+
+template <size_t dim>
+struct BroadcastUnroll
+{
+ template <typename T>
+ static void unroll(const SimpleTensor<T> &src1, const SimpleTensor<T> &src2, SimpleTensor<T> &dst,
+ ConvertPolicy convert_policy, Coordinates &id_src1, Coordinates &id_src2, Coordinates &id_dst)
{
- intermediate_type val = static_cast<intermediate_type>(src1[i]) + static_cast<intermediate_type>(src2[i]);
- result[i] = (convert_policy == ConvertPolicy::SATURATE) ? saturate_cast<T>(val) : static_cast<T>(val);
+ const bool src1_is_broadcast = (src1.shape()[dim - 1] != dst.shape()[dim - 1]);
+ const bool src2_is_broadcast = (src2.shape()[dim - 1] != dst.shape()[dim - 1]);
+
+ id_src1.set(dim - 1, 0);
+ id_src2.set(dim - 1, 0);
+ id_dst.set(dim - 1, 0);
+
+ for(size_t i = 0; i < dst.shape()[dim - 1]; ++i, ++id_dst[dim - 1])
+ {
+ BroadcastUnroll < dim - 1 >::unroll(src1, src2, dst, convert_policy, id_src1, id_src2, id_dst);
+
+ id_src1[dim - 1] += !src1_is_broadcast;
+ id_src2[dim - 1] += !src2_is_broadcast;
+ }
}
+};
- return result;
+template <>
+struct BroadcastUnroll<0>
+{
+ template <typename T>
+ static void unroll(const SimpleTensor<T> &src1, const SimpleTensor<T> &src2, SimpleTensor<T> &dst,
+ ConvertPolicy convert_policy, Coordinates &id_src1, Coordinates &id_src2, Coordinates &id_dst)
+ {
+ dst[coord2index(dst.shape(), id_dst)] = add(src1[coord2index(src1.shape(), id_src1)], src2[coord2index(src2.shape(), id_src2)], convert_policy);
+ }
+};
+} // namespace
+
+template <typename T>
+SimpleTensor<T> arithmetic_addition(const SimpleTensor<T> &src1, const SimpleTensor<T> &src2, DataType dst_data_type, ConvertPolicy convert_policy)
+{
+ SimpleTensor<T> dst(TensorShape::broadcast_shape(src1.shape(), src2.shape()), dst_data_type);
+
+ Coordinates id_src1, id_src2, id_dst;
+
+ BroadcastUnroll<Coordinates::num_max_dimensions>::unroll(src1, src2, dst, convert_policy, id_src1, id_src2, id_dst);
+
+ return dst;
}
template SimpleTensor<uint8_t> arithmetic_addition(const SimpleTensor<uint8_t> &src1, const SimpleTensor<uint8_t> &src2, DataType dst_data_type, ConvertPolicy convert_policy);
template SimpleTensor<int16_t> arithmetic_addition(const SimpleTensor<int16_t> &src1, const SimpleTensor<int16_t> &src2, DataType dst_data_type, ConvertPolicy convert_policy);
template SimpleTensor<int8_t> arithmetic_addition(const SimpleTensor<int8_t> &src1, const SimpleTensor<int8_t> &src2, DataType dst_data_type, ConvertPolicy convert_policy);
-template SimpleTensor<half> arithmetic_addition(const SimpleTensor<half> &src1, const SimpleTensor<half> &src2, DataType dst_data_type,
- ConvertPolicy convert_policy);
+template SimpleTensor<half> arithmetic_addition(const SimpleTensor<half> &src1, const SimpleTensor<half> &src2, DataType dst_data_type, ConvertPolicy convert_policy);
template SimpleTensor<float> arithmetic_addition(const SimpleTensor<float> &src1, const SimpleTensor<float> &src2, DataType dst_data_type, ConvertPolicy convert_policy);
} // namespace reference
} // namespace validation