aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/Helpers.h
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/core/Helpers.h')
-rw-r--r--arm_compute/core/Helpers.h74
1 files changed, 60 insertions, 14 deletions
diff --git a/arm_compute/core/Helpers.h b/arm_compute/core/Helpers.h
index b6635aba6d..960201510a 100644
--- a/arm_compute/core/Helpers.h
+++ b/arm_compute/core/Helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,6 +55,16 @@ public:
*/
Iterator(const ITensor *tensor, const Window &window);
+ /** Create a container iterator for the tensor with the specified number of dimensions, stride, buffer pointer and window.
+ *
+ * @param[in] num_dims The number of dimensions.
+ * @param[in] strides The strides in bytes.
+ * @param[in] buffer The data buffer.
+ * @param[in] offset The offset in bytes from the beginning of the buffer to the first element of the tensor.
+ * @param[in] window The window which will be used to iterate over the tensor.
+ */
+ Iterator(size_t num_dims, const Strides &strides, uint8_t *buffer, size_t offset, const Window &window);
+
/** Increment the iterator along the specified dimension of the step value associated to the dimension.
*
* @warning It is the caller's responsibility to call increment(dimension+1) when reaching the end of a dimension, the iterator will not check for overflow.
@@ -86,13 +96,22 @@ public:
void reset(size_t dimension);
private:
+ /** Initialize a container iterator for the tensor with the specified number of dimensions, stride, buffer pointer and window.
+ *
+ * @param[in] num_dims The number of dimensions.
+ * @param[in] strides The strides in bytes.
+ * @param[in] buffer The data buffer.
+ * @param[in] offset The offset in bytes from the beginning of the buffer to the first element of the tensor.
+ * @param[in] window The window which will be used to iterate over the tensor.
+ */
+ void initialize(size_t num_dims, const Strides &strides, uint8_t *buffer, size_t offset, const Window &window);
+
uint8_t *_ptr;
class Dimension
{
public:
- constexpr Dimension()
- : _dim_start(0), _stride(0)
+ constexpr Dimension() : _dim_start(0), _stride(0)
{
}
@@ -112,7 +131,7 @@ private:
* @param[in,out] iterators Tensor iterators which will be updated by this function before calling lambda_function.
*/
template <typename L, typename... Ts>
-inline void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators);
+inline void execute_window_loop(const Window &w, L &&lambda_function, Ts &&...iterators);
/** Permutes given Dimensions according to a permutation vector
*
@@ -125,7 +144,7 @@ template <typename T>
inline void permute(Dimensions<T> &dimensions, const PermutationVector &perm)
{
auto dimensions_copy = utility::make_array<Dimensions<T>::num_max_dimensions>(dimensions.begin(), dimensions.end());
- for(unsigned int i = 0; i < perm.num_dimensions(); ++i)
+ for (unsigned int i = 0; i < perm.num_dimensions(); ++i)
{
T dimension_val = (perm[i] < dimensions.num_dimensions()) ? dimensions_copy[perm[i]] : 0;
dimensions.set(i, dimension_val);
@@ -142,7 +161,7 @@ inline void permute(Dimensions<T> &dimensions, const PermutationVector &perm)
inline void permute(TensorShape &shape, const PermutationVector &perm)
{
TensorShape shape_copy = shape;
- for(unsigned int i = 0; i < perm.num_dimensions(); ++i)
+ for (unsigned int i = 0; i < perm.num_dimensions(); ++i)
{
size_t dimension_val = (perm[i] < shape.num_dimensions()) ? shape_copy[perm[i]] : 1;
shape.set(i, dimension_val, false, false); // Avoid changes in _num_dimension
@@ -159,8 +178,11 @@ inline void permute(TensorShape &shape, const PermutationVector &perm)
*
* @return The corresponding valid region
*/
-ValidRegion calculate_valid_region_scale(const ITensorInfo &src_info, const TensorShape &dst_shape,
- InterpolationPolicy interpolate_policy, SamplingPolicy sampling_policy, bool border_undefined);
+ValidRegion calculate_valid_region_scale(const ITensorInfo &src_info,
+ const TensorShape &dst_shape,
+ InterpolationPolicy interpolate_policy,
+ SamplingPolicy sampling_policy,
+ bool border_undefined);
/** Convert a linear index into n-dimensional coordinates.
*
@@ -180,6 +202,22 @@ inline Coordinates index2coords(const TensorShape &shape, int index);
*/
inline int coords2index(const TensorShape &shape, const Coordinates &coord);
+/** Returns a static map used to find an index or dimension based on a data layout
+ *
+ * *** Layouts ***
+ *
+ * *** 4D ***
+ * [N C H W]
+ * [3 2 1 0]
+ * [N H W C]
+ *
+ * * *** 5D ***
+ * [N C D H W]
+ * [4 3 2 1 0]
+ * [N D H W C]
+ */
+const std::map<DataLayout, std::vector<DataLayoutDimension>> &get_layout_map();
+
/** Get the index of the given dimension.
*
* @param[in] data_layout The data layout.
@@ -187,7 +225,8 @@ inline int coords2index(const TensorShape &shape, const Coordinates &coord);
*
* @return The int conversion of the requested data layout index.
*/
-inline size_t get_data_layout_dimension_index(const DataLayout data_layout, const DataLayoutDimension data_layout_dimension);
+inline size_t get_data_layout_dimension_index(const DataLayout &data_layout,
+ const DataLayoutDimension &data_layout_dimension);
/** Get the DataLayoutDimension of a given index and layout.
*
@@ -196,7 +235,7 @@ inline size_t get_data_layout_dimension_index(const DataLayout data_layout, cons
*
* @return The dimension which this index is requested for.
*/
-inline DataLayoutDimension get_index_data_layout_dimension(const DataLayout data_layout, const size_t index);
+inline DataLayoutDimension get_index_data_layout_dimension(const DataLayout &data_layout, const size_t index);
/** Calculate the number of output tiles required by Winograd Convolution layer. This utility function can be used by the Winograd input transform
* to know the number of tiles on the x and y direction
@@ -208,10 +247,17 @@ inline DataLayoutDimension get_index_data_layout_dimension(const DataLayout data
*
* @return the number of output tiles along the x and y directions of size "output_tile_size"
*/
-inline Size2D compute_winograd_convolution_tiles(const Size2D &in_dims, const Size2D &kernel_size, const Size2D &output_tile_size, const PadStrideInfo &conv_info)
+inline Size2D compute_winograd_convolution_tiles(const Size2D &in_dims,
+ const Size2D &kernel_size,
+ const Size2D &output_tile_size,
+ const PadStrideInfo &conv_info)
{
- int num_tiles_x = std::ceil((in_dims.width - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right()) / static_cast<float>(output_tile_size.width));
- int num_tiles_y = std::ceil((in_dims.height - (kernel_size.height - 1) + conv_info.pad_top() + conv_info.pad_bottom()) / static_cast<float>(output_tile_size.height));
+ int num_tiles_x =
+ std::ceil((in_dims.width - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right()) /
+ static_cast<float>(output_tile_size.width));
+ int num_tiles_y =
+ std::ceil((in_dims.height - (kernel_size.height - 1) + conv_info.pad_top() + conv_info.pad_bottom()) /
+ static_cast<float>(output_tile_size.height));
// Clamp in case we provide paddings but we have 1D convolution
num_tiles_x = std::min(num_tiles_x, static_cast<int>(in_dims.width));
@@ -240,7 +286,7 @@ inline T wrap_around(T x, T m)
*/
inline Coordinates &convert_negative_axis(Coordinates &coords, int max_value)
{
- for(unsigned int i = 0; i < coords.num_dimensions(); ++i)
+ for (unsigned int i = 0; i < coords.num_dimensions(); ++i)
{
coords[i] = wrap_around(coords[i], max_value);
}