aboutsummaryrefslogtreecommitdiff
path: root/src/core/helpers
diff options
context:
space:
mode:
authorSang-Hoon Park <sang-hoon.park@arm.com>2020-10-19 16:00:11 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2020-10-20 10:27:40 +0000
commit68dd25fbe6e4d3c3513fa5993863419769aa08fc (patch)
treeb918be923f9e4550c306d7f44d168ab938a71fc8 /src/core/helpers
parentf0a4e609d98f111b6a7d4a2b578d1b7cba64b805 (diff)
downloadComputeLibrary-68dd25fbe6e4d3c3513fa5993863419769aa08fc.tar.gz
COMPMID-3637: Move utility headers from arm_compute to src
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: If9d6fa8c900b68c4b6fd373f2fc1f9abb83ea917 Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4145 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Sang-Hoon Park <sang-hoon.park@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src/core/helpers')
-rw-r--r--src/core/helpers/AutoConfiguration.h176
-rw-r--r--src/core/helpers/NormalizationHelpers.h47
-rw-r--r--src/core/helpers/ScaleHelpers.h331
-rw-r--r--src/core/helpers/SoftmaxHelpers.cpp45
-rw-r--r--src/core/helpers/SoftmaxHelpers.h50
-rw-r--r--src/core/helpers/Utils.h97
-rw-r--r--src/core/helpers/WindowHelpers.cpp183
-rw-r--r--src/core/helpers/WindowHelpers.h172
8 files changed, 1101 insertions, 0 deletions
diff --git a/src/core/helpers/AutoConfiguration.h b/src/core/helpers/AutoConfiguration.h
new file mode 100644
index 0000000000..6880a6cb66
--- /dev/null
+++ b/src/core/helpers/AutoConfiguration.h
@@ -0,0 +1,176 @@
+/*
+* Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_HELPERS_AUTOCONFIGURATION_H
+#define SRC_CORE_HELPERS_AUTOCONFIGURATION_H
+
+#include "arm_compute/core/ITensorInfo.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+/** Auto initialize the tensor info (shape, number of channels and data type) if the current assignment is empty.
+ *
+ * @param[in,out] info Tensor info used to check and assign.
+ * @param[in] shape New shape.
+ * @param[in] num_channels New number of channels.
+ * @param[in] data_type New data type
+ * @param[in] quantization_info (Optional) New quantization info
+ *
+ * @return True if the tensor info has been initialized
+ */
+inline bool auto_init_if_empty(ITensorInfo &info,
+ const TensorShape &shape,
+ int num_channels, DataType data_type,
+ QuantizationInfo quantization_info = QuantizationInfo())
+{
+ if(info.tensor_shape().total_size() == 0)
+ {
+ info.set_data_type(data_type);
+ info.set_num_channels(num_channels);
+ info.set_tensor_shape(shape);
+ info.set_quantization_info(quantization_info);
+ return true;
+ }
+
+ return false;
+}
+
+/** Auto initialize the tensor info using another tensor info.
+*
+* @param info_sink Tensor info used to check and assign
+* @param info_source Tensor info used to assign
+*
+* @return True if the tensor info has been initialized
+*/
+inline bool auto_init_if_empty(ITensorInfo &info_sink, const ITensorInfo &info_source)
+{
+ if(info_sink.tensor_shape().total_size() == 0)
+ {
+ info_sink.set_data_type(info_source.data_type());
+ info_sink.set_num_channels(info_source.num_channels());
+ info_sink.set_tensor_shape(info_source.tensor_shape());
+ info_sink.set_quantization_info(info_source.quantization_info());
+ info_sink.set_data_layout(info_source.data_layout());
+ return true;
+ }
+
+ return false;
+}
+
+/** Set the shape to the specified value if the current assignment is empty.
+ *
+ * @param[in,out] info Tensor info used to check and assign.
+ * @param[in] shape New shape.
+ *
+ * @return True if the shape has been changed.
+ */
+inline bool set_shape_if_empty(ITensorInfo &info, const TensorShape &shape)
+{
+ if(info.tensor_shape().total_size() == 0)
+ {
+ info.set_tensor_shape(shape);
+ return true;
+ }
+
+ return false;
+}
+
+/** Set the format, data type and number of channels to the specified value if
+ * the current data type is unknown.
+ *
+ * @param[in,out] info Tensor info used to check and assign.
+ * @param[in] format New format.
+ *
+ * @return True if the format has been changed.
+ */
+inline bool set_format_if_unknown(ITensorInfo &info, Format format)
+{
+ if(info.data_type() == DataType::UNKNOWN)
+ {
+ info.set_format(format);
+ return true;
+ }
+
+ return false;
+}
+
+/** Set the data type and number of channels to the specified value if
+ * the current data type is unknown.
+ *
+ * @param[in,out] info Tensor info used to check and assign.
+ * @param[in] data_type New data type.
+ *
+ * @return True if the data type has been changed.
+ */
+inline bool set_data_type_if_unknown(ITensorInfo &info, DataType data_type)
+{
+ if(info.data_type() == DataType::UNKNOWN)
+ {
+ info.set_data_type(data_type);
+ return true;
+ }
+
+ return false;
+}
+
+/** Set the data layout to the specified value if
+ * the current data layout is unknown.
+ *
+ * @param[in,out] info Tensor info used to check and assign.
+ * @param[in] data_layout New data layout.
+ *
+ * @return True if the data type has been changed.
+ */
+inline bool set_data_layout_if_unknown(ITensorInfo &info, DataLayout data_layout)
+{
+ if(info.data_layout() == DataLayout::UNKNOWN)
+ {
+ info.set_data_layout(data_layout);
+ return true;
+ }
+
+ return false;
+}
+
+/** Set the quantization info to the specified value if
+ * the current quantization info is empty and the data type of asymmetric quantized type
+ *
+ * @param[in,out] info Tensor info used to check and assign.
+ * @param[in] quantization_info Quantization info
+ *
+ * @return True if the quantization info has been changed.
+ */
+inline bool set_quantization_info_if_empty(ITensorInfo &info, QuantizationInfo quantization_info)
+{
+ if(info.quantization_info().empty() && (is_data_type_quantized_asymmetric(info.data_type())))
+ {
+ info.set_quantization_info(quantization_info);
+ return true;
+ }
+
+ return false;
+}
+} // namespace arm_compute
+
+#endif /* SRC_CORE_HELPERS_AUTOCONFIGURATION_H */
diff --git a/src/core/helpers/NormalizationHelpers.h b/src/core/helpers/NormalizationHelpers.h
new file mode 100644
index 0000000000..d94d5e3602
--- /dev/null
+++ b/src/core/helpers/NormalizationHelpers.h
@@ -0,0 +1,47 @@
+/*
+* Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_HELPERS_NORMALIZATIONHELPERS_H
+#define SRC_CORE_HELPERS_NORMALIZATIONHELPERS_H
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+/** Calculate the normalization dimension index for a given normalization type
+ *
+ * @param[in] layout Data layout of the input and output tensor
+ * @param[in] info Normalization info
+ *
+ * @return Normalization dimension index
+ */
+inline unsigned int get_normalization_dimension_index(DataLayout layout, const NormalizationLayerInfo &info)
+{
+ const unsigned int width_idx = get_data_layout_dimension_index(layout, DataLayoutDimension::WIDTH);
+ const unsigned int channel_idx = get_data_layout_dimension_index(layout, DataLayoutDimension::CHANNEL);
+
+ return info.is_in_map() ? width_idx : channel_idx;
+}
+} // namespace arm_compute
+#endif /* SRC_CORE_HELPERS_NORMALIZATIONHELPERS_H */
diff --git a/src/core/helpers/ScaleHelpers.h b/src/core/helpers/ScaleHelpers.h
new file mode 100644
index 0000000000..827bbef4cd
--- /dev/null
+++ b/src/core/helpers/ScaleHelpers.h
@@ -0,0 +1,331 @@
+/*
+* Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_HELPERS_SCALEHELPERS_H
+#define SRC_CORE_HELPERS_SCALEHELPERS_H
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/QuantizationInfo.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_compute
+{
+namespace scale_helpers
+{
+/** Computes bilinear interpolation using the pointer to the top-left pixel and the pixel's distance between
+ * the real coordinates and the smallest following integer coordinates. Input must be in single channel format.
+ *
+ * @param[in] pixel_ptr Pointer to the top-left pixel value of a single channel input.
+ * @param[in] stride Stride to access the bottom-left and bottom-right pixel values
+ * @param[in] dx Pixel's distance between the X real coordinate and the smallest X following integer
+ * @param[in] dy Pixel's distance between the Y real coordinate and the smallest Y following integer
+ *
+ * @note dx and dy must be in the range [0, 1.0]
+ *
+ * @return The bilinear interpolated pixel value
+ */
+template <typename T>
+inline T delta_bilinear_c1(const T *pixel_ptr, size_t stride, float dx, float dy)
+{
+ ARM_COMPUTE_ERROR_ON(pixel_ptr == nullptr);
+
+ const float dx1 = 1.0f - dx;
+ const float dy1 = 1.0f - dy;
+
+ const T a00 = *pixel_ptr;
+ const T a01 = *(pixel_ptr + 1);
+ const T a10 = *(pixel_ptr + stride);
+ const T a11 = *(pixel_ptr + stride + 1);
+
+ const float w1 = dx1 * dy1;
+ const float w2 = dx * dy1;
+ const float w3 = dx1 * dy;
+ const float w4 = dx * dy;
+
+ return static_cast<T>(a00 * w1 + a01 * w2 + a10 * w3 + a11 * w4);
+}
+
+/** Computes bilinear interpolation for quantized input and output, using the pointer to the top-left pixel and the pixel's distance between
+ * the real coordinates and the smallest following integer coordinates. Input must be QASYMM8 and in single channel format.
+ *
+ * @param[in] pixel_ptr Pointer to the top-left pixel value of a single channel input.
+ * @param[in] stride Stride to access the bottom-left and bottom-right pixel values
+ * @param[in] dx Pixel's distance between the X real coordinate and the smallest X following integer
+ * @param[in] dy Pixel's distance between the Y real coordinate and the smallest Y following integer
+ * @param[in] iq_info Input QuantizationInfo
+ * @param[in] oq_info Output QuantizationInfo
+ *
+ * @note dx and dy must be in the range [0, 1.0]
+ *
+ * @return The bilinear interpolated pixel value
+ */
+inline uint8_t delta_bilinear_c1_quantized(const uint8_t *pixel_ptr, size_t stride, float dx, float dy,
+ UniformQuantizationInfo iq_info, UniformQuantizationInfo oq_info)
+{
+ ARM_COMPUTE_ERROR_ON(pixel_ptr == nullptr);
+
+ const float dx1 = 1.0f - dx;
+ const float dy1 = 1.0f - dy;
+
+ const float a00 = dequantize_qasymm8(*pixel_ptr, iq_info);
+ const float a01 = dequantize_qasymm8(*(pixel_ptr + 1), iq_info);
+ const float a10 = dequantize_qasymm8(*(pixel_ptr + stride), iq_info);
+ const float a11 = dequantize_qasymm8(*(pixel_ptr + stride + 1), iq_info);
+
+ const float w1 = dx1 * dy1;
+ const float w2 = dx * dy1;
+ const float w3 = dx1 * dy;
+ const float w4 = dx * dy;
+ float res = a00 * w1 + a01 * w2 + a10 * w3 + a11 * w4;
+ return static_cast<uint8_t>(quantize_qasymm8(res, oq_info));
+}
+
+/** Computes bilinear interpolation for quantized input and output, using the pointer to the top-left pixel and the pixel's distance between
+ * the real coordinates and the smallest following integer coordinates. Input must be QASYMM8_SIGNED and in single channel format.
+ *
+ * @param[in] pixel_ptr Pointer to the top-left pixel value of a single channel input.
+ * @param[in] stride Stride to access the bottom-left and bottom-right pixel values
+ * @param[in] dx Pixel's distance between the X real coordinate and the smallest X following integer
+ * @param[in] dy Pixel's distance between the Y real coordinate and the smallest Y following integer
+ * @param[in] iq_info Input QuantizationInfo
+ * @param[in] oq_info Output QuantizationInfo
+ *
+ * @note dx and dy must be in the range [0, 1.0]
+ *
+ * @return The bilinear interpolated pixel value
+ */
+inline int8_t delta_bilinear_c1_quantized(const int8_t *pixel_ptr, size_t stride, float dx, float dy,
+ UniformQuantizationInfo iq_info, UniformQuantizationInfo oq_info)
+{
+ ARM_COMPUTE_ERROR_ON(pixel_ptr == nullptr);
+
+ const float dx1 = 1.0f - dx;
+ const float dy1 = 1.0f - dy;
+
+ const float a00 = dequantize_qasymm8_signed(*pixel_ptr, iq_info);
+ const float a01 = dequantize_qasymm8_signed(*(pixel_ptr + 1), iq_info);
+ const float a10 = dequantize_qasymm8_signed(*(pixel_ptr + stride), iq_info);
+ const float a11 = dequantize_qasymm8_signed(*(pixel_ptr + stride + 1), iq_info);
+
+ const float w1 = dx1 * dy1;
+ const float w2 = dx * dy1;
+ const float w3 = dx1 * dy;
+ const float w4 = dx * dy;
+ float res = a00 * w1 + a01 * w2 + a10 * w3 + a11 * w4;
+ return static_cast<int8_t>(quantize_qasymm8_signed(res, oq_info));
+}
+
+/** Computes linear interpolation using the pointer to the top pixel and the pixel's distance between
+ * the real coordinates and the smallest following integer coordinates. Input must be in single channel format.
+ *
+ * @param[in] pixel_ptr Pointer to the top pixel value of a single channel input.
+ * @param[in] stride Stride to access the bottom pixel value
+ * @param[in] dy Pixel's distance between the Y real coordinate and the smallest Y following integer
+ *
+ * @note dy must be in the range [0, 1.0]
+ *
+ * @return The linear interpolated pixel value
+ */
+template <typename T>
+inline T delta_linear_c1_y(const T *pixel_ptr, size_t stride, float dy)
+{
+ ARM_COMPUTE_ERROR_ON(pixel_ptr == nullptr);
+
+ const float dy1 = 1.0f - dy;
+
+ const T a00 = *pixel_ptr;
+ const T a10 = *(pixel_ptr + stride);
+
+ const float w1 = dy1;
+ const float w3 = dy;
+
+ return static_cast<T>(a00 * w1 + a10 * w3);
+}
+
+/** Computes linear interpolation using the pointer to the left pixel and the pixel's distance between
+ * the real coordinates and the smallest following integer coordinates. Input must be in single channel format.
+ *
+ * @param[in] pixel_ptr Pointer to the left pixel value of a single channel input.
+ * @param[in] dx Pixel's distance between the X real coordinate and the smallest X following integer
+ *
+ * @note dx must be in the range [0, 1.0]
+ *
+ * @return The linear interpolated pixel value
+ */
+template <typename T>
+inline T delta_linear_c1_x(const T *pixel_ptr, float dx)
+{
+ ARM_COMPUTE_ERROR_ON(pixel_ptr == nullptr);
+
+ const T a00 = *pixel_ptr;
+ const T a01 = *(pixel_ptr + 1);
+
+ const float dx1 = 1.0f - dx;
+
+ const float w1 = dx1;
+ const float w2 = dx;
+
+ return static_cast<T>(a00 * w1 + a01 * w2);
+}
+
+/** Return the pixel at (x,y) using bilinear interpolation.
+ *
+ * @warning Only works if the iterator was created with an IImage
+ *
+ * @param[in] first_pixel_ptr Pointer to the first pixel of a single channel input.
+ * @param[in] stride Stride in bytes of the image;
+ * @param[in] x X position of the wanted pixel
+ * @param[in] y Y position of the wanted pixel
+ *
+ * @return The pixel at (x, y) using bilinear interpolation.
+ */
+template <typename T>
+inline T pixel_bilinear_c1(const T *first_pixel_ptr, size_t stride, float x, float y)
+{
+ ARM_COMPUTE_ERROR_ON(first_pixel_ptr == nullptr);
+
+ const int32_t xi = std::floor(x);
+ const int32_t yi = std::floor(y);
+
+ const float dx = x - xi;
+ const float dy = y - yi;
+
+ return delta_bilinear_c1(first_pixel_ptr + xi + yi * stride, stride, dx, dy);
+}
+
+/** Return the pixel at (x,y) using bilinear interpolation by clamping when out of borders. The image must be single channel input
+ *
+ * @warning Only works if the iterator was created with an IImage
+ *
+ * @param[in] first_pixel_ptr Pointer to the first pixel of a single channel image.
+ * @param[in] stride Stride in bytes of the image
+ * @param[in] width Width of the image
+ * @param[in] height Height of the image
+ * @param[in] x X position of the wanted pixel
+ * @param[in] y Y position of the wanted pixel
+ *
+ * @return The pixel at (x, y) using bilinear interpolation.
+ */
+template <typename T>
+inline uint8_t
+pixel_bilinear_c1_clamp(const T *first_pixel_ptr, size_t stride, size_t width, size_t height, float x, float y)
+{
+ ARM_COMPUTE_ERROR_ON(first_pixel_ptr == nullptr);
+
+ x = std::max(-1.f, std::min(x, static_cast<float>(width)));
+ y = std::max(-1.f, std::min(y, static_cast<float>(height)));
+
+ const float xi = std::floor(x);
+ const float yi = std::floor(y);
+
+ const float dx = x - xi;
+ const float dy = y - yi;
+
+ if(dx == 0.0f)
+ {
+ if(dy == 0.0f)
+ {
+ return static_cast<T>(first_pixel_ptr[static_cast<int32_t>(xi) + static_cast<int32_t>(yi) * stride]);
+ }
+ return delta_linear_c1_y(first_pixel_ptr + static_cast<int32_t>(xi) + static_cast<int32_t>(yi) * stride,
+ stride, dy);
+ }
+ if(dy == 0.0f)
+ {
+ return delta_linear_c1_x(first_pixel_ptr + static_cast<int32_t>(xi) + static_cast<int32_t>(yi) * stride,
+ dx);
+ }
+ return delta_bilinear_c1(first_pixel_ptr + static_cast<int32_t>(xi) + static_cast<int32_t>(yi) * stride, stride,
+ dx, dy);
+}
+
+/** Return the pixel at (x,y) using area interpolation by clamping when out of borders. The image must be single channel U8
+ *
+ * @note The interpolation area depends on the width and height ration of the input and output images
+ * @note Currently average of the contributing pixels is calculated
+ *
+ * @param[in] first_pixel_ptr Pointer to the first pixel of a single channel U8 image.
+ * @param[in] stride Stride in bytes of the image
+ * @param[in] width Width of the image
+ * @param[in] height Height of the image
+ * @param[in] wr Width ratio among the input image width and output image width.
+ * @param[in] hr Height ratio among the input image height and output image height.
+ * @param[in] x X position of the wanted pixel
+ * @param[in] y Y position of the wanted pixel
+ *
+ * @return The pixel at (x, y) using area interpolation.
+ */
+inline uint8_t
+pixel_area_c1u8_clamp(const uint8_t *first_pixel_ptr, size_t stride, size_t width, size_t height, float wr,
+ float hr, int x, int y)
+{
+ ARM_COMPUTE_ERROR_ON(first_pixel_ptr == nullptr);
+
+ // Calculate sampling position
+ float in_x = (x + 0.5f) * wr - 0.5f;
+ float in_y = (y + 0.5f) * hr - 0.5f;
+
+ // Get bounding box offsets
+ int x_from = std::floor(x * wr - 0.5f - in_x);
+ int y_from = std::floor(y * hr - 0.5f - in_y);
+ int x_to = std::ceil((x + 1) * wr - 0.5f - in_x);
+ int y_to = std::ceil((y + 1) * hr - 0.5f - in_y);
+
+ // Clamp position to borders
+ in_x = std::max(-1.f, std::min(in_x, static_cast<float>(width)));
+ in_y = std::max(-1.f, std::min(in_y, static_cast<float>(height)));
+
+ // Clamp bounding box offsets to borders
+ x_from = ((in_x + x_from) < -1) ? -1 : x_from;
+ y_from = ((in_y + y_from) < -1) ? -1 : y_from;
+ x_to = ((in_x + x_to) > width) ? (width - in_x) : x_to;
+ y_to = ((in_y + y_to) > height) ? (height - in_y) : y_to;
+
+ // Get pixel index
+ const int xi = std::floor(in_x);
+ const int yi = std::floor(in_y);
+
+ // Bounding box elements in each dimension
+ const int x_elements = (x_to - x_from + 1);
+ const int y_elements = (y_to - y_from + 1);
+ ARM_COMPUTE_ERROR_ON(x_elements == 0 || y_elements == 0);
+
+ // Sum pixels in area
+ int sum = 0;
+ for(int j = yi + y_from, je = yi + y_to; j <= je; ++j)
+ {
+ const uint8_t *ptr = first_pixel_ptr + j * stride + xi + x_from;
+ sum = std::accumulate(ptr, ptr + x_elements, sum);
+ }
+
+ // Return average
+ return sum / (x_elements * y_elements);
+}
+} // namespace scale_helpers
+} // namespace arm_compute
+
+#endif /* SRC_CORE_HELPERS_SCALEHELPERS_H */
diff --git a/src/core/helpers/SoftmaxHelpers.cpp b/src/core/helpers/SoftmaxHelpers.cpp
new file mode 100644
index 0000000000..71b971af31
--- /dev/null
+++ b/src/core/helpers/SoftmaxHelpers.cpp
@@ -0,0 +1,45 @@
+/*
+* Copyright (c) 2017-2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/helpers/SoftmaxHelpers.h"
+
+namespace arm_compute
+{
+namespace softmax_helpers
+{
+PermutationVector get_permutation_vector_from_softmax_axis(size_t axis)
+{
+ switch(axis)
+ {
+ case 1:
+ return PermutationVector(1U, 0U, 2U, 3U);
+ case 2:
+ return PermutationVector(2U, 1U, 0U, 3U);
+ case 3:
+ return PermutationVector(3U, 1U, 2U, 0U);
+ default:
+ ARM_COMPUTE_ERROR("Axis not supported");
+ }
+}
+} // namespace softmax_helpers
+} // namespace arm_compute
diff --git a/src/core/helpers/SoftmaxHelpers.h b/src/core/helpers/SoftmaxHelpers.h
new file mode 100644
index 0000000000..de5490a14d
--- /dev/null
+++ b/src/core/helpers/SoftmaxHelpers.h
@@ -0,0 +1,50 @@
+/*
+* Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_HELPERS_SOFTMAXHELPERS_H
+#define SRC_CORE_HELPERS_SOFTMAXHELPERS_H
+
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+namespace softmax_helpers
+{
+/** Given a softmax axis, this function returns the permutation vector required to put the axis to the front
+ *
+ * @note This function assumes a tensor rank <= 4
+ *
+ * Axis selects the dimension on which softmax is performed.
+ * E.g. For input of shape 4x5x6 and axis=1, softmax will be applied to 4x6=24 vectors of size 5.
+ * Interally softmax kernels is always performed on the first dimension (front dimension), therefore permutation is
+ * required to put the dimension specified by @p axis to the first dimension.
+ *
+ * @param[in] axis Axis on which to perform softmax. Supported: 1, 2, 3 (0 implies no permutation needed)
+ *
+ * @return the permutation vector
+ */
+PermutationVector get_permutation_vector_from_softmax_axis(size_t axis);
+} // namespace softmax_helpers
+} // namespace arm_compute
+
+#endif /* SRC_CORE_HELPERS_SOFTMAXHELPERS_H */
diff --git a/src/core/helpers/Utils.h b/src/core/helpers/Utils.h
new file mode 100644
index 0000000000..3c3b2b93f9
--- /dev/null
+++ b/src/core/helpers/Utils.h
@@ -0,0 +1,97 @@
+/*
+* Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_HELPERS_UTILS_H
+#define SRC_CORE_HELPERS_UTILS_H
+
+#include "arm_compute/core/ITensorInfo.h"
+
+namespace arm_compute
+{
+/** Create a strides object based on the provided strides and the tensor dimensions.
+ *
+ * @param[in] info Tensor info object providing the shape of the tensor for unspecified strides.
+ * @param[in] stride_x Stride to be used in X dimension (in bytes).
+ * @param[in] fixed_strides Strides to be used in higher dimensions starting at Y (in bytes).
+ *
+ * @return Strides object based on the specified strides. Missing strides are
+ * calculated based on the tensor shape and the strides of lower dimensions.
+ */
+template <typename T, typename... Ts>
+inline Strides compute_strides(const ITensorInfo &info, T stride_x, Ts &&... fixed_strides)
+{
+ const TensorShape &shape = info.tensor_shape();
+
+ // Create strides object
+ Strides strides(stride_x, fixed_strides...);
+
+ for(size_t i = 1 + sizeof...(Ts); i < info.num_dimensions(); ++i)
+ {
+ strides.set(i, shape[i - 1] * strides[i - 1]);
+ }
+
+ return strides;
+}
+
+/** Create a strides object based on the tensor dimensions.
+ *
+ * @param[in] info Tensor info object used to compute the strides.
+ *
+ * @return Strides object based on element size and tensor shape.
+ */
+template <typename... Ts>
+inline Strides compute_strides(const ITensorInfo &info)
+{
+ return compute_strides(info, info.element_size());
+}
+
+/** Given an integer value, this function returns the next power of two
+ *
+ * @param[in] x Input value
+ *
+ * @return the next power of two
+ */
+inline unsigned int get_next_power_two(unsigned int x)
+{
+ // Decrement by 1
+ x--;
+
+ // Shift right by 1
+ x |= x >> 1u;
+ // Shift right by 2
+ x |= x >> 2u;
+ // Shift right by 4
+ x |= x >> 4u;
+ // Shift right by 8
+ x |= x >> 8u;
+ // Shift right by 16
+ x |= x >> 16u;
+
+ // Increment by 1
+ x++;
+
+ return x;
+}
+} // namespace arm_compute
+
+#endif /* SRC_CORE_HELPERS_UTILS_H */
diff --git a/src/core/helpers/WindowHelpers.cpp b/src/core/helpers/WindowHelpers.cpp
new file mode 100644
index 0000000000..ba10eb9775
--- /dev/null
+++ b/src/core/helpers/WindowHelpers.cpp
@@ -0,0 +1,183 @@
+/*
+* Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/helpers/WindowHelpers.h"
+
+namespace arm_compute
+{
+Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
+{
+ if(!skip_border)
+ {
+ border_size = BorderSize(0);
+ }
+
+ const Coordinates &anchor = valid_region.anchor;
+ const TensorShape &shape = valid_region.shape;
+
+ Window window;
+
+ window.set(0, Window::Dimension(
+ // Skip the border left of the image
+ anchor[0] + border_size.left,
+ // Skip the border right of the image
+ // Make sure the window width is a multiple of the step size
+ anchor[0] + border_size.left + ceil_to_multiple(std::max(0, static_cast<int>(shape[0]) - static_cast<int>(border_size.left) - static_cast<int>(border_size.right)), steps[0]),
+ steps[0]));
+
+ size_t n = 1;
+
+ if(anchor.num_dimensions() > 1)
+ {
+ window.set(1, Window::Dimension(
+ // Skip the border above the image
+ anchor[1] + border_size.top,
+ // Skip the border below the image
+ anchor[1] + border_size.top + ceil_to_multiple(std::max(0, static_cast<int>(shape[1]) - static_cast<int>(border_size.top) - static_cast<int>(border_size.bottom)), steps[1]),
+ steps[1]));
+
+ ++n;
+ }
+
+ if(anchor.num_dimensions() > 2)
+ {
+ window.set(2, Window::Dimension(anchor[2], std::max<size_t>(1, shape[2]), steps[2]));
+
+ ++n;
+ }
+
+ for(; n < anchor.num_dimensions(); ++n)
+ {
+ window.set(n, Window::Dimension(anchor[n], std::max<size_t>(1, shape[n])));
+ }
+
+ for(; n < Coordinates::num_max_dimensions; ++n)
+ {
+ window.set(n, Window::Dimension(0, 1));
+ }
+
+ return window;
+}
+
+Window calculate_max_enlarged_window(const ValidRegion &valid_region, const Steps &steps, BorderSize border_size)
+{
+ const Coordinates &anchor = valid_region.anchor;
+ const TensorShape &shape = valid_region.shape;
+
+ Window window;
+
+ window.set(0, Window::Dimension(
+ // move the anchor to the start from the border
+ anchor[0] - border_size.left,
+ // move the anchor to include the right end border
+ // Make sure the window width is a multiple of the step size
+ anchor[0] - border_size.left + ceil_to_multiple(shape[0] + border_size.left + border_size.right, steps[0]),
+ steps[0]));
+
+ size_t n = 1;
+
+ if(anchor.num_dimensions() > 1)
+ {
+ window.set(1, Window::Dimension(
+ // Include the border above the image
+ anchor[1] - border_size.top,
+ // Include the border below the image
+ anchor[1] - border_size.top + ceil_to_multiple(shape[1] + border_size.top + border_size.bottom, steps[1]),
+ steps[1]));
+
+ ++n;
+ }
+
+ if(anchor.num_dimensions() > 2)
+ {
+ window.set(2, Window::Dimension(0, std::max<size_t>(1, shape[n]), steps[2]));
+
+ ++n;
+ }
+
+ for(; n < anchor.num_dimensions(); ++n)
+ {
+ window.set(n, Window::Dimension(anchor[n], std::max<size_t>(1, shape[n])));
+ }
+
+ for(; n < Coordinates::num_max_dimensions; ++n)
+ {
+ window.set(n, Window::Dimension(0, 1));
+ }
+
+ return window;
+}
+
+Window calculate_max_window_horizontal(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
+{
+ if(skip_border)
+ {
+ border_size.top = 0;
+ border_size.bottom = 0;
+ }
+ else
+ {
+ border_size.left = 0;
+ border_size.right = 0;
+ }
+
+ const Coordinates &anchor = valid_region.anchor;
+ const TensorShape &shape = valid_region.shape;
+
+ Window window;
+
+ window.set(0, Window::Dimension(
+ // Skip the border left of the image
+ anchor[0] + border_size.left,
+ // Skip the border right of the image
+ // Make sure the window width is a multiple of the step size
+ anchor[0] + border_size.left + ceil_to_multiple(std::max(0, static_cast<int>(shape[0]) - static_cast<int>(border_size.left) - static_cast<int>(border_size.right)), steps[0]),
+ steps[0]));
+
+ size_t n = 1;
+
+ if(anchor.num_dimensions() > 1)
+ {
+ window.set(1, Window::Dimension(
+ // Skip the border above the image
+ anchor[1] - border_size.top,
+ // Skip the border below the image
+ anchor[1] + shape[1] + border_size.bottom,
+ 1));
+
+ ++n;
+ }
+
+ for(; n < anchor.num_dimensions(); ++n)
+ {
+ window.set(n, Window::Dimension(anchor[n], std::max<size_t>(1, shape[n])));
+ }
+
+ for(; n < Coordinates::num_max_dimensions; ++n)
+ {
+ window.set(n, Window::Dimension(0, 1));
+ }
+
+ return window;
+}
+} // namespace arm_compute
diff --git a/src/core/helpers/WindowHelpers.h b/src/core/helpers/WindowHelpers.h
new file mode 100644
index 0000000000..9bc2135b6d
--- /dev/null
+++ b/src/core/helpers/WindowHelpers.h
@@ -0,0 +1,172 @@
+/*
+* Copyright (c) 2020 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CORE_HELPERS_WINDOWHELPERS_H
+#define SRC_CORE_HELPERS_WINDOWHELPERS_H
+
+#include "arm_compute/core/IAccessWindow.h"
+#include "arm_compute/core/Steps.h"
+#include "arm_compute/core/Window.h"
+
+namespace arm_compute
+{
+/** Update window and padding size for each of the access patterns.
+ *
+ * First the window size is reduced based on all access patterns that are not
+ * allowed to modify the padding of the underlying tensor. Then the padding of
+ * the remaining tensors is increased to match the window.
+ *
+ * @param[in] win Window that is used by the kernel.
+ * @param[in] patterns Access patterns used to calculate the final window and padding.
+ *
+ * @return True if the window has been changed. Changes to the padding do not
+ * influence the returned value.
+ */
+template <typename... Ts>
+bool update_window_and_padding(Window &win, Ts &&... patterns)
+{
+ bool window_changed = false;
+
+ utility::for_each([&](const IAccessWindow & w)
+ {
+ window_changed |= w.update_window_if_needed(win);
+ },
+ patterns...);
+
+ bool padding_changed = false;
+
+ utility::for_each([&](IAccessWindow & w)
+ {
+ padding_changed |= w.update_padding_if_needed(win);
+ },
+ patterns...);
+
+ return window_changed;
+}
+
+/** Intersect multiple valid regions.
+ *
+ * @param[in] regions Valid regions.
+ *
+ * @return Intersection of all regions.
+ */
+template <typename... Ts>
+ValidRegion intersect_valid_regions(const Ts &... regions)
+{
+ auto intersect = [](const ValidRegion & r1, const ValidRegion & r2) -> ValidRegion
+ {
+ ValidRegion region;
+
+ for(size_t d = 0; d < std::min(r1.anchor.num_dimensions(), r2.anchor.num_dimensions()); ++d)
+ {
+ region.anchor.set(d, std::max(r1.anchor[d], r2.anchor[d]));
+ }
+
+ for(size_t d = 0; d < std::min(r1.shape.num_dimensions(), r2.shape.num_dimensions()); ++d)
+ {
+ region.shape.set(d, std::min(r1.shape[d], r2.shape[d]));
+ }
+
+ return region;
+ };
+
+ return utility::foldl(intersect, regions...);
+}
+
+#ifndef DOXYGEN_SKIP_THIS
+/** Calculate the maximum window for a given tensor shape and border setting
+ *
+ * @param[in] valid_region Valid region object defining the shape of the tensor space for which the window is created.
+ * @param[in] steps (Optional) Number of elements processed for each step.
+ * @param[in] skip_border (Optional) If true exclude the border region from the window.
+ * @param[in] border_size (Optional) Border size.
+ *
+ * @return The maximum window the kernel can be executed on.
+ */
+Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize());
+
+/** Calculate the maximum window for a given tensor shape and border setting
+ *
+ * @param[in] info Tensor info object defining the shape of the object for which the window is created.
+ * @param[in] steps (Optional) Number of elements processed for each step.
+ * @param[in] skip_border (Optional) If true exclude the border region from the window.
+ * @param[in] border_size (Optional) Border size.
+ *
+ * @return The maximum window the kernel can be executed on.
+ */
+inline Window calculate_max_window(const ITensorInfo &info, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize())
+{
+ return calculate_max_window(info.valid_region(), steps, skip_border, border_size);
+}
+
+/** Calculate the maximum window used by a horizontal kernel for a given tensor shape and border setting
+ *
+ * @param[in] valid_region Valid region object defining the shape of the tensor space for which the window is created.
+ * @param[in] steps (Optional) Number of elements processed for each step.
+ * @param[in] skip_border (Optional) If true exclude the border region from the window.
+ * @param[in] border_size (Optional) Border size. The border region will be excluded from the window.
+ *
+ * @return The maximum window the kernel can be executed on.
+ */
+Window calculate_max_window_horizontal(const ValidRegion &valid_region, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize());
+
+/** Calculate the maximum window used by a horizontal kernel for a given tensor shape and border setting
+ *
+ * @param[in] info Tensor info object defining the shape of the object for which the window is created.
+ * @param[in] steps (Optional) Number of elements processed for each step.
+ * @param[in] skip_border (Optional) If true exclude the border region from the window.
+ * @param[in] border_size (Optional) Border size.
+ *
+ * @return The maximum window the kernel can be executed on.
+ */
+inline Window calculate_max_window_horizontal(const ITensorInfo &info, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize())
+{
+ return calculate_max_window_horizontal(info.valid_region(), steps, skip_border, border_size);
+}
+
+/** Calculate the maximum window for a given tensor shape and border setting. The window will also includes the border.
+ *
+ * @param[in] valid_region Valid region object defining the shape of the tensor space for which the window is created.
+ * @param[in] steps (Optional) Number of elements processed for each step.
+ * @param[in] border_size (Optional) Border size. The border region will be included in the window.
+ *
+ * @return The maximum window the kernel can be executed on.
+ */
+Window calculate_max_enlarged_window(const ValidRegion &valid_region, const Steps &steps = Steps(), BorderSize border_size = BorderSize());
+
+/** Calculate the maximum window for a given tensor shape and border setting. The window will also includes the border.
+ *
+ * @param[in] info Tensor info object defining the shape of the object for which the window is created.
+ * @param[in] steps (Optional) Number of elements processed for each step.
+ * @param[in] border_size (Optional) Border size. The border region will be included in the window.
+ *
+ * @return The maximum window the kernel can be executed on.
+ */
+inline Window calculate_max_enlarged_window(const ITensorInfo &info, const Steps &steps = Steps(), BorderSize border_size = BorderSize())
+{
+ return calculate_max_enlarged_window(info.valid_region(), steps, border_size);
+}
+#endif /* DOXYGEN_SKIP_THIS */
+} // namespace arm_compute
+
+#endif /* SRC_CORE_HELPERS_WINDOWHELPERS_H */