aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDiego Lopez Recas <Diego.LopezRecas@arm.com>2017-12-18 11:28:27 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:43:10 +0000
commitbcbc970f1f5b47f7314a5ad078820bc8a5edca94 (patch)
tree6bca70896b993adc80aea5e652d9e8dcefff3d8d
parent1bfc7849950b67aeee382b08f27fd0b1b5ef0587 (diff)
downloadComputeLibrary-bcbc970f1f5b47f7314a5ad078820bc8a5edca94.tar.gz
IVGCVSW-863 calculate_max_window..() family takes ValidRegion
Change-Id: I91e39713ffa580e9d2213988ad3517a8a41bf4e8 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/114013 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/core/Helpers.h51
-rw-r--r--arm_compute/core/Helpers.inl21
-rw-r--r--arm_compute/core/Types.h14
-rw-r--r--src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp4
-rw-r--r--src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp2
-rw-r--r--src/core/GLES_COMPUTE/kernels/GCGEMMMatrixMultiplyKernel.cpp2
-rw-r--r--src/core/GLES_COMPUTE/kernels/GCGEMMTranspose1xWKernel.cpp4
-rw-r--r--src/core/Helpers.cpp68
-rw-r--r--src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp4
-rw-r--r--src/core/NEON/kernels/NELocallyConnectedMatrixMultiplyKernel.cpp4
-rw-r--r--src/core/SubTensorInfo.cpp6
-rw-r--r--src/core/TensorInfo.cpp14
-rw-r--r--tests/Utils.h52
17 files changed, 158 insertions, 106 deletions
diff --git a/arm_compute/core/Helpers.h b/arm_compute/core/Helpers.h
index 3575fcf1b9..e01e4baa6b 100644
--- a/arm_compute/core/Helpers.h
+++ b/arm_compute/core/Helpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016, 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -361,6 +361,17 @@ bool update_window_and_padding(Window &win, Ts &&... patterns)
/** Calculate the maximum window for a given tensor shape and border setting
*
+ * @param[in] valid_region Valid region object defining the shape of the tensor space for which the window is created.
+ * @param[in] steps (Optional) Number of elements processed for each step.
+ * @param[in] skip_border (Optional) If true exclude the border region from the window.
+ * @param[in] border_size (Optional) Border size.
+ *
+ * @return The maximum window the kernel can be executed on.
+ */
+Window calculate_max_window(const ValidRegion &valid_region, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize());
+
+/** Calculate the maximum window for a given tensor shape and border setting
+ *
* @param[in] info Tensor info object defining the shape of the object for which the window is created.
* @param[in] steps (Optional) Number of elements processed for each step.
* @param[in] skip_border (Optional) If true exclude the border region from the window.
@@ -368,18 +379,45 @@ bool update_window_and_padding(Window &win, Ts &&... patterns)
*
* @return The maximum window the kernel can be executed on.
*/
-Window calculate_max_window(const ITensorInfo &info, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize());
+inline Window calculate_max_window(const ITensorInfo &info, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize())
+{
+ return calculate_max_window(info.valid_region(), steps, skip_border, border_size);
+}
+
+/** Calculate the maximum window used by a horizontal kernel for a given tensor shape and border setting
+ *
+ * @param[in] valid_region Valid region object defining the shape of the tensor space for which the window is created.
+ * @param[in] steps (Optional) Number of elements processed for each step.
+ * @param[in] skip_border (Optional) If true exclude the border region from the window.
+ * @param[in] border_size (Optional) Border size. The border region will be excluded from the window.
+ *
+ * @return The maximum window the kernel can be executed on.
+ */
+Window calculate_max_window_horizontal(const ValidRegion &valid_region, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize());
/** Calculate the maximum window used by a horizontal kernel for a given tensor shape and border setting
*
* @param[in] info Tensor info object defining the shape of the object for which the window is created.
* @param[in] steps (Optional) Number of elements processed for each step.
* @param[in] skip_border (Optional) If true exclude the border region from the window.
- * @param[in] border_size (Optional) Border size. The border region will be excluded from the window.
+ * @param[in] border_size (Optional) Border size.
+ *
+ * @return The maximum window the kernel can be executed on.
+ */
+inline Window calculate_max_window_horizontal(const ITensorInfo &info, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize())
+{
+ return calculate_max_window_horizontal(info.valid_region(), steps, skip_border, border_size);
+}
+
+/** Calculate the maximum window for a given tensor shape and border setting. The window will also includes the border.
+ *
+ * @param[in] valid_region Valid region object defining the shape of the tensor space for which the window is created.
+ * @param[in] steps (Optional) Number of elements processed for each step.
+ * @param[in] border_size (Optional) Border size. The border region will be included in the window.
*
* @return The maximum window the kernel can be executed on.
*/
-Window calculate_max_window_horizontal(const ITensorInfo &info, const Steps &steps = Steps(), bool skip_border = false, BorderSize border_size = BorderSize());
+Window calculate_max_enlarged_window(const ValidRegion &valid_region, const Steps &steps = Steps(), BorderSize border_size = BorderSize());
/** Calculate the maximum window for a given tensor shape and border setting. The window will also includes the border.
*
@@ -389,7 +427,10 @@ Window calculate_max_window_horizontal(const ITensorInfo &info, const Steps &ste
*
* @return The maximum window the kernel can be executed on.
*/
-Window calculate_max_enlarged_window(const ITensorInfo &info, const Steps &steps = Steps(), BorderSize border_size = BorderSize());
+inline Window calculate_max_enlarged_window(const ITensorInfo &info, const Steps &steps = Steps(), BorderSize border_size = BorderSize())
+{
+ return calculate_max_enlarged_window(info.valid_region(), steps, border_size);
+}
/** Intersect multiple valid regions.
*
diff --git a/arm_compute/core/Helpers.inl b/arm_compute/core/Helpers.inl
index 4121fb1e8d..6d0f8b0104 100644
--- a/arm_compute/core/Helpers.inl
+++ b/arm_compute/core/Helpers.inl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016, 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -284,11 +284,14 @@ inline bool set_quantization_info_if_empty(ITensorInfo &info, QuantizationInfo q
inline ValidRegion calculate_valid_region_scale(const ITensorInfo &src_info, const TensorShape &dst_shape, InterpolationPolicy policy, BorderSize border_size, bool border_undefined)
{
- const auto wr = static_cast<float>(dst_shape[0]) / static_cast<float>(src_info.tensor_shape()[0]);
- const auto hr = static_cast<float>(dst_shape[1]) / static_cast<float>(src_info.tensor_shape()[1]);
- Coordinates anchor;
- anchor.set_num_dimensions(src_info.tensor_shape().num_dimensions());
- TensorShape new_dst_shape(dst_shape);
+ const auto wr = static_cast<float>(dst_shape[0]) / static_cast<float>(src_info.tensor_shape()[0]);
+ const auto hr = static_cast<float>(dst_shape[1]) / static_cast<float>(src_info.tensor_shape()[1]);
+
+ ValidRegion valid_region{ Coordinates(), dst_shape, src_info.tensor_shape().num_dimensions() };
+
+ Coordinates &anchor = valid_region.anchor;
+ TensorShape &shape = valid_region.shape;
+
anchor.set(0, (policy == InterpolationPolicy::BILINEAR
&& border_undefined) ?
((static_cast<int>(src_info.valid_region().anchor[0]) + border_size.left + 0.5f) * wr - 0.5f) :
@@ -306,10 +309,10 @@ inline ValidRegion calculate_valid_region_scale(const ITensorInfo &src_info, con
((static_cast<int>(src_info.valid_region().anchor[1]) + static_cast<int>(src_info.valid_region().shape[1]) - 1) - 1 + 0.5f) * hr - 0.5f :
((static_cast<int>(src_info.valid_region().anchor[1]) + static_cast<int>(src_info.valid_region().shape[1])) + 0.5f) * hr - 0.5f;
- new_dst_shape.set(0, shape_out_x - anchor[0]);
- new_dst_shape.set(1, shape_out_y - anchor[1]);
+ shape.set(0, shape_out_x - anchor[0]);
+ shape.set(1, shape_out_y - anchor[1]);
- return ValidRegion(std::move(anchor), std::move(new_dst_shape));
+ return valid_region;
}
inline Coordinates index2coords(const TensorShape &shape, int index)
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 538449b40a..5402e358b5 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016, 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -165,9 +165,17 @@ struct ValidRegion
ValidRegion &operator=(ValidRegion &&) = default;
~ValidRegion() = default;
- ValidRegion(Coordinates anchor, TensorShape shape)
- : anchor{ anchor }, shape{ shape }
+ ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape)
+ : anchor{ an_anchor }, shape{ a_shape }
{
+ anchor.set_num_dimensions(std::max(anchor.num_dimensions(), shape.num_dimensions()));
+ }
+
+ ValidRegion(const Coordinates &an_anchor, const TensorShape &a_shape, size_t num_dimensions)
+ : anchor{ an_anchor }, shape{ a_shape }
+ {
+ ARM_COMPUTE_ERROR_ON(num_dimensions < std::max(anchor.num_dimensions(), shape.num_dimensions()));
+ anchor.set_num_dimensions(num_dimensions);
}
/** Return the start of the valid region for the given dimension @p d */
diff --git a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
index 0275d4fd83..9b30c64130 100644
--- a/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
+++ b/src/core/CL/kernels/CLDepthConcatenateLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017, 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -103,7 +103,7 @@ void CLDepthConcatenateLayerKernel::configure(const ICLTensor *input, unsigned i
AccessWindowRectangle input_access(input->info(), -_left_right, -_top_bottom, num_elems_read_per_iteration, num_rows_read_per_iteration);
AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
ICLKernel::configure(win);
}
diff --git a/src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp
index 508fb899f1..a3af5b00ba 100644
--- a/src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLLocallyConnectedMatrixMultiplyKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017, 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,7 +88,7 @@ void CLLocallyConnectedMatrixMultiplyKernel::configure(const ICLTensor *input0,
update_window_and_padding(win, input0_access, input1_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
ICLKernel::configure(win);
}
diff --git a/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp
index 1aac2502e7..4eceab8266 100644
--- a/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCDepthConcatenateLayerKernel.cpp
@@ -106,7 +106,7 @@ void GCDepthConcatenateLayerKernel::configure(const IGCTensor *input, unsigned i
AccessWindowRectangle input_access(input->info(), -_left_right, -_top_bottom, num_elems_read_per_iteration, num_rows_read_per_iteration);
AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
IGCKernel::configure(win);
}
diff --git a/src/core/GLES_COMPUTE/kernels/GCGEMMMatrixMultiplyKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCGEMMMatrixMultiplyKernel.cpp
index 32fbbfeefb..a5f09e8eac 100644
--- a/src/core/GLES_COMPUTE/kernels/GCGEMMMatrixMultiplyKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCGEMMMatrixMultiplyKernel.cpp
@@ -106,7 +106,7 @@ void GCGEMMMatrixMultiplyKernel::configure(const IGCTensor *input0, const IGCTen
update_window_and_padding(win, input0_access, input1_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
}
else
{
diff --git a/src/core/GLES_COMPUTE/kernels/GCGEMMTranspose1xWKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCGEMMTranspose1xWKernel.cpp
index c361b60f84..a78446e074 100644
--- a/src/core/GLES_COMPUTE/kernels/GCGEMMTranspose1xWKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCGEMMTranspose1xWKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017, 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -90,7 +90,7 @@ void GCGEMMTranspose1xWKernel::configure(const IGCTensor *input, IGCTensor *outp
update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), input->info()->tensor_shape()));
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), input->info()->tensor_shape()));
IGCKernel::configure(win);
}
diff --git a/src/core/Helpers.cpp b/src/core/Helpers.cpp
index 151d7de9a4..3ee0fa7321 100644
--- a/src/core/Helpers.cpp
+++ b/src/core/Helpers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016, 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,25 +23,17 @@
*/
#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/IKernel.h"
-#include "arm_compute/core/ITensorInfo.h"
-#include "arm_compute/core/Utils.h"
-
-#include <algorithm>
-#include <cstdint>
-
using namespace arm_compute;
-Window arm_compute::calculate_max_window(const ITensorInfo &info, const Steps &steps, bool skip_border, BorderSize border_size)
+Window arm_compute::calculate_max_window(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
{
if(!skip_border)
{
border_size = BorderSize(0);
}
- const Coordinates &anchor = info.valid_region().anchor;
- const TensorShape &shape = info.valid_region().shape;
+ const Coordinates &anchor = valid_region.anchor;
+ const TensorShape &shape = valid_region.shape;
Window window;
@@ -53,10 +45,9 @@ Window arm_compute::calculate_max_window(const ITensorInfo &info, const Steps &s
anchor[0] + border_size.left + ceil_to_multiple(std::max(0, static_cast<int>(shape[0]) - static_cast<int>(border_size.left) - static_cast<int>(border_size.right)), steps[0]),
steps[0]));
- size_t n = 1;
- const TensorShape &tensor_shape = info.tensor_shape();
+ size_t n = 1;
- if(tensor_shape.num_dimensions() > 1)
+ if(anchor.num_dimensions() > 1)
{
window.set(1, Window::Dimension(
// Skip the border above the image
@@ -68,18 +59,23 @@ Window arm_compute::calculate_max_window(const ITensorInfo &info, const Steps &s
++n;
}
+ for(; n < anchor.num_dimensions(); ++n)
+ {
+ window.set(n, Window::Dimension(anchor[n], std::max<size_t>(1, shape[n])));
+ }
+
for(; n < Coordinates::num_max_dimensions; ++n)
{
- window.set(n, Window::Dimension(0, std::max<size_t>(1, tensor_shape[n])));
+ window.set(n, Window::Dimension(0, 1));
}
return window;
}
-Window arm_compute::calculate_max_enlarged_window(const ITensorInfo &info, const Steps &steps, BorderSize border_size)
+Window arm_compute::calculate_max_enlarged_window(const ValidRegion &valid_region, const Steps &steps, BorderSize border_size)
{
- const Coordinates &anchor = info.valid_region().anchor;
- const TensorShape &shape = info.valid_region().shape;
+ const Coordinates &anchor = valid_region.anchor;
+ const TensorShape &shape = valid_region.shape;
Window window;
@@ -91,10 +87,9 @@ Window arm_compute::calculate_max_enlarged_window(const ITensorInfo &info, const
anchor[0] - border_size.left + ceil_to_multiple(shape[0] + border_size.left + border_size.right, steps[0]),
steps[0]));
- size_t n = 1;
- const TensorShape &tensor_shape = info.tensor_shape();
+ size_t n = 1;
- if(tensor_shape.num_dimensions() > 1)
+ if(anchor.num_dimensions() > 1)
{
window.set(1, Window::Dimension(
// Include the border above the image
@@ -106,22 +101,27 @@ Window arm_compute::calculate_max_enlarged_window(const ITensorInfo &info, const
++n;
}
- if(tensor_shape.num_dimensions() > 2)
+ if(anchor.num_dimensions() > 2)
{
- window.set(2, Window::Dimension(0, std::max<size_t>(1, tensor_shape[n]), steps[2]));
+ window.set(2, Window::Dimension(0, std::max<size_t>(1, shape[n]), steps[2]));
++n;
}
+ for(; n < anchor.num_dimensions(); ++n)
+ {
+ window.set(n, Window::Dimension(anchor[n], std::max<size_t>(1, shape[n])));
+ }
+
for(; n < Coordinates::num_max_dimensions; ++n)
{
- window.set(n, Window::Dimension(0, std::max<size_t>(1, tensor_shape[n])));
+ window.set(n, Window::Dimension(0, 1));
}
return window;
}
-Window arm_compute::calculate_max_window_horizontal(const ITensorInfo &info, const Steps &steps, bool skip_border, BorderSize border_size)
+Window arm_compute::calculate_max_window_horizontal(const ValidRegion &valid_region, const Steps &steps, bool skip_border, BorderSize border_size)
{
if(skip_border)
{
@@ -134,8 +134,8 @@ Window arm_compute::calculate_max_window_horizontal(const ITensorInfo &info, con
border_size.right = 0;
}
- const Coordinates &anchor = info.valid_region().anchor;
- const TensorShape &shape = info.valid_region().shape;
+ const Coordinates &anchor = valid_region.anchor;
+ const TensorShape &shape = valid_region.shape;
Window window;
@@ -147,10 +147,9 @@ Window arm_compute::calculate_max_window_horizontal(const ITensorInfo &info, con
anchor[0] + border_size.left + ceil_to_multiple(std::max(0, static_cast<int>(shape[0]) - static_cast<int>(border_size.left) - static_cast<int>(border_size.right)), steps[0]),
steps[0]));
- size_t n = 1;
- const TensorShape &tensor_shape = info.tensor_shape();
+ size_t n = 1;
- if(tensor_shape.num_dimensions() > 1)
+ if(anchor.num_dimensions() > 1)
{
window.set(1, Window::Dimension(
// Skip the border above the image
@@ -162,9 +161,14 @@ Window arm_compute::calculate_max_window_horizontal(const ITensorInfo &info, con
++n;
}
+ for(; n < anchor.num_dimensions(); ++n)
+ {
+ window.set(n, Window::Dimension(anchor[n], std::max<size_t>(1, shape[n])));
+ }
+
for(; n < Coordinates::num_max_dimensions; ++n)
{
- window.set(n, Window::Dimension(0, std::max<size_t>(1, tensor_shape[n])));
+ window.set(n, Window::Dimension(0, 1));
}
return window;
diff --git a/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp b/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp
index 01b0f10f70..891a03c5cc 100644
--- a/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEDepthConcatenateLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017, 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -154,7 +154,7 @@ void NEDepthConcatenateLayerKernel::configure(const ITensor *input, unsigned int
AccessWindowRectangle input_access(input->info(), -_left_right, -_top_bottom, num_elems_read_per_iteration, num_rows_read_per_iteration);
AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
INEKernel::configure(win);
}
diff --git a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
index 9104f0b98a..a100cd2bf6 100644
--- a/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017, 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -786,7 +786,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input0, ITe
window_changed = update_window_and_padding(win, in0_access, in1_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->tensor_shape()));
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
}
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
diff --git a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp
index 0aadfc941c..c1ee770db5 100644
--- a/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMLowpReductionKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017, 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -64,7 +64,7 @@ std::pair<Status, Window> validate_and_configure_window_matrix_a_reduction(ITens
bool window_changed = update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->tensor_shape()));
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
@@ -90,7 +90,7 @@ std::pair<Status, Window> validate_and_configure_window_matrix_b_reduction(ITens
bool window_changed = update_window_and_padding(win, input_access, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->tensor_shape()));
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
return std::make_pair(err, win);
diff --git a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp
index a88dc655c1..5d6163d583 100644
--- a/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMTranspose1xWKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016, 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -89,7 +89,7 @@ std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITen
{
AccessWindowTranspose output_access(output, 0, 0, num_elems_processed_per_iteration, 1, scale_x, 1.f / scale_x);
window_changed = window_changed || update_window_and_padding(win, output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), input->tensor_shape()));
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), input->tensor_shape()));
}
Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
diff --git a/src/core/NEON/kernels/NELocallyConnectedMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NELocallyConnectedMatrixMultiplyKernel.cpp
index 52e30066de..58da0402bc 100644
--- a/src/core/NEON/kernels/NELocallyConnectedMatrixMultiplyKernel.cpp
+++ b/src/core/NEON/kernels/NELocallyConnectedMatrixMultiplyKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017, 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -333,7 +333,7 @@ void NELocallyConnectedMatrixMultiplyKernel::configure(const ITensor *input0, co
AccessWindowHorizontal(input1->info(), 0, num_elems_processed_per_iteration_x),
output_access);
- output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->info()->tensor_shape()));
INEKernel::configure(win);
}
diff --git a/src/core/SubTensorInfo.cpp b/src/core/SubTensorInfo.cpp
index 8acd71ceb0..7a4886ff60 100644
--- a/src/core/SubTensorInfo.cpp
+++ b/src/core/SubTensorInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017, 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,9 +46,7 @@ SubTensorInfo::SubTensorInfo(ITensorInfo *parent, TensorShape tensor_shape, Coor
}
// Initialize valid region
- Coordinates coordinates;
- coordinates.set_num_dimensions(_tensor_shape.num_dimensions());
- _valid_region = ValidRegion{ coordinates, _tensor_shape };
+ _valid_region = ValidRegion{ Coordinates(), _tensor_shape };
}
std::unique_ptr<ITensorInfo> SubTensorInfo::clone() const
diff --git a/src/core/TensorInfo.cpp b/src/core/TensorInfo.cpp
index 60e76bf84a..2008217c85 100644
--- a/src/core/TensorInfo.cpp
+++ b/src/core/TensorInfo.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016, 2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -159,9 +159,7 @@ void TensorInfo::init(const TensorShape &tensor_shape, size_t num_channels, Data
_strides_in_bytes = strides_in_bytes;
_total_size = total_size_in_bytes;
- Coordinates coordinates;
- coordinates.set_num_dimensions(_tensor_shape.num_dimensions());
- _valid_region = ValidRegion{ coordinates, _tensor_shape };
+ _valid_region = ValidRegion{ Coordinates(), _tensor_shape };
}
void TensorInfo::init(const HOGInfo &hog_info, unsigned int width, unsigned int height)
@@ -201,9 +199,7 @@ size_t TensorInfo::init_auto_padding(const TensorShape &tensor_shape, size_t num
_format = Format::UNKNOWN;
_tensor_shape = tensor_shape;
- Coordinates coordinates;
- coordinates.set_num_dimensions(_tensor_shape.num_dimensions());
- _valid_region = ValidRegion{ coordinates, _tensor_shape };
+ _valid_region = ValidRegion{ Coordinates(), _tensor_shape };
auto_padding();
@@ -368,9 +364,7 @@ ITensorInfo &TensorInfo::set_tensor_shape(TensorShape shape)
_total_size = _tensor_shape[idx_last_dimension] * _strides_in_bytes[idx_last_dimension];
}
- Coordinates coordinates;
- coordinates.set_num_dimensions(_tensor_shape.num_dimensions());
- _valid_region = ValidRegion{ coordinates, _tensor_shape };
+ _valid_region = ValidRegion{ Coordinates(), _tensor_shape };
return *this;
}
diff --git a/tests/Utils.h b/tests/Utils.h
index 5814965a40..750d907778 100644
--- a/tests/Utils.h
+++ b/tests/Utils.h
@@ -202,17 +202,19 @@ inline I foldl(F &&func, I &&initial, T &&value, Vs &&... values)
/** Create a valid region based on tensor shape, border mode and border size
*
- * @param[in] shape Shape used as size of the valid region.
+ * @param[in] a_shape Shape used as size of the valid region.
* @param[in] border_undefined (Optional) Boolean indicating if the border mode is undefined.
* @param[in] border_size (Optional) Border size used to specify the region to exclude.
*
* @return A valid region starting at (0, 0, ...) with size of @p shape if @p border_undefined is false; otherwise
* return A valid region starting at (@p border_size.left, @p border_size.top, ...) with reduced size of @p shape.
*/
-inline ValidRegion shape_to_valid_region(TensorShape shape, bool border_undefined = false, BorderSize border_size = BorderSize(0))
+inline ValidRegion shape_to_valid_region(const TensorShape &a_shape, bool border_undefined = false, BorderSize border_size = BorderSize(0))
{
- Coordinates anchor;
- anchor.set_num_dimensions(shape.num_dimensions());
+ ValidRegion valid_region{ Coordinates(), a_shape };
+
+ Coordinates &anchor = valid_region.anchor;
+ TensorShape &shape = valid_region.shape;
if(border_undefined)
{
@@ -228,43 +230,45 @@ inline ValidRegion shape_to_valid_region(TensorShape shape, bool border_undefine
shape.set(1, valid_shape_y);
}
- return ValidRegion(std::move(anchor), std::move(shape));
+ return valid_region;
}
/** Create a valid region for Gaussian Pyramid Half based on tensor shape and valid region at level "i - 1" and border mode
*
* @note The border size is 2 in case of Gaussian Pyramid Half
*
- * @param[in] shape Shape used at level "i - 1" of Gaussian Pyramid Half
- * @param[in] valid_region Valid region used at level "i - 1" of Gaussian Pyramid Half
+ * @param[in] a_shape Shape used at level "i - 1" of Gaussian Pyramid Half
+ * @param[in] a_valid_region Valid region used at level "i - 1" of Gaussian Pyramid Half
* @param[in] border_undefined (Optional) Boolean indicating if the border mode is undefined.
*
* return The valid region for the level "i" of Gaussian Pyramid Half
*/
-inline ValidRegion shape_to_valid_region_gaussian_pyramid_half(TensorShape shape, ValidRegion valid_region, bool border_undefined = false)
+inline ValidRegion shape_to_valid_region_gaussian_pyramid_half(const TensorShape &a_shape, const ValidRegion &a_valid_region, bool border_undefined = false)
{
constexpr int border_size = 2;
- Coordinates anchor;
- anchor.set_num_dimensions(shape.num_dimensions());
+
+ ValidRegion valid_region{ Coordinates(), a_shape };
+
+ Coordinates &anchor = valid_region.anchor;
+ TensorShape &shape = valid_region.shape;
// Compute tensor shape for level "i" of Gaussian Pyramid Half
// dst_width = (src_width + 1) * 0.5f
// dst_height = (src_height + 1) * 0.5f
- TensorShape dst_shape = shape;
- dst_shape.set(0, (shape[0] + 1) * 0.5f);
- dst_shape.set(1, (shape[1] + 1) * 0.5f);
+ shape.set(0, (shape[0] + 1) * 0.5f);
+ shape.set(1, (shape[1] + 1) * 0.5f);
if(border_undefined)
{
ARM_COMPUTE_ERROR_ON(shape.num_dimensions() < 2);
// Compute the left and top invalid borders
- float invalid_border_left = static_cast<float>(valid_region.anchor.x() + border_size) / 2.0f;
- float invalid_border_top = static_cast<float>(valid_region.anchor.y() + border_size) / 2.0f;
+ float invalid_border_left = static_cast<float>(a_valid_region.anchor.x() + border_size) / 2.0f;
+ float invalid_border_top = static_cast<float>(a_valid_region.anchor.y() + border_size) / 2.0f;
// For the new anchor point we can have 2 cases:
- // 1) If the width/height of the tensor shape is odd, we have to take the ceil value of (valid_region.anchor.x() + border_size) / 2.0f or (valid_region.anchor.y() + border_size / 2.0f
- // 2) If the width/height of the tensor shape is even, we have to take the floor value of (valid_region.anchor.x() + border_size) / 2.0f or (valid_region.anchor.y() + border_size) / 2.0f
+ // 1) If the width/height of the tensor shape is odd, we have to take the ceil value of (a_valid_region.anchor.x() + border_size) / 2.0f or (a_valid_region.anchor.y() + border_size / 2.0f
+ // 2) If the width/height of the tensor shape is even, we have to take the floor value of (a_valid_region.anchor.x() + border_size) / 2.0f or (a_valid_region.anchor.y() + border_size) / 2.0f
// In this manner we should be able to propagate correctly the valid region along all levels of the pyramid
invalid_border_left = (shape[0] % 2) ? std::ceil(invalid_border_left) : std::floor(invalid_border_left);
invalid_border_top = (shape[1] % 2) ? std::ceil(invalid_border_top) : std::floor(invalid_border_top);
@@ -275,21 +279,21 @@ inline ValidRegion shape_to_valid_region_gaussian_pyramid_half(TensorShape shape
// Compute shape
// Calculate the right and bottom invalid borders at the previous level of the pyramid
- const float prev_invalid_border_right = static_cast<float>(shape[0] - (valid_region.anchor.x() + valid_region.shape[0]));
- const float prev_invalid_border_bottom = static_cast<float>(shape[1] - (valid_region.anchor.y() + valid_region.shape[1]));
+ const float prev_invalid_border_right = static_cast<float>(shape[0] - (a_valid_region.anchor.x() + a_valid_region.shape[0]));
+ const float prev_invalid_border_bottom = static_cast<float>(shape[1] - (a_valid_region.anchor.y() + a_valid_region.shape[1]));
// Calculate the right and bottom invalid borders at the current level of the pyramid
const float invalid_border_right = std::ceil((prev_invalid_border_right + static_cast<float>(border_size)) / 2.0f);
const float invalid_border_bottom = std::ceil((prev_invalid_border_bottom + static_cast<float>(border_size)) / 2.0f);
- const int valid_shape_x = std::max(0, static_cast<int>(dst_shape.x()) - static_cast<int>(invalid_border_left) - static_cast<int>(invalid_border_right));
- const int valid_shape_y = std::max(0, static_cast<int>(dst_shape.y()) - static_cast<int>(invalid_border_top) - static_cast<int>(invalid_border_bottom));
+ const int valid_shape_x = std::max(0, static_cast<int>(shape.x()) - static_cast<int>(invalid_border_left) - static_cast<int>(invalid_border_right));
+ const int valid_shape_y = std::max(0, static_cast<int>(shape.y()) - static_cast<int>(invalid_border_top) - static_cast<int>(invalid_border_bottom));
- dst_shape.set(0, valid_shape_x);
- dst_shape.set(1, valid_shape_y);
+ shape.set(0, valid_shape_x);
+ shape.set(1, valid_shape_y);
}
- return ValidRegion(std::move(anchor), std::move(dst_shape));
+ return valid_region;
}
/** Write the value after casting the pointer according to @p data_type.