aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOmar Al Khatib <omar.alkhatib@arm.com>2023-03-28 11:14:29 +0100
committerOmar Al Khatib <omar.alkhatib@arm.com>2023-04-03 08:08:47 +0000
commitfff9a4cb56d3d3dbfe85db555eea4bc9b3143996 (patch)
tree5b567a9ee004560fbb5eeda0bd803d1a6397fd4f
parent1fad9f27bfc9da711e216bd80eef60bb68d9cb86 (diff)
downloadComputeLibrary-fff9a4cb56d3d3dbfe85db555eea4bc9b3143996.tar.gz
Add Cropping to CLBatchToSpace
- Deprecate dynamic block shape interface - Iterate over output window instead of input window for simpler implementation and better performance. - Add cropping support and cropping tests Resolves [COMPMID-5865] Signed-off-by: Omar Al Khatib <omar.alkhatib@arm.com> Change-Id: Ic67d44a6a39299ecdafc507f12e3dc5d517dfb62 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9385 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/runtime/CL/functions/CLBatchToSpaceLayer.h18
-rw-r--r--docs/user_guide/release_version_and_change_log.dox1
-rw-r--r--src/core/CL/cl_kernels/nchw/batch_to_space.cl44
-rw-r--r--src/core/CL/cl_kernels/nhwc/batch_to_space.cl42
-rw-r--r--src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp66
-rw-r--r--src/core/CL/kernels/CLBatchToSpaceLayerKernel.h17
-rw-r--r--src/runtime/CL/functions/CLBatchToSpaceLayer.cpp17
-rw-r--r--tests/validation/CL/BatchToSpaceLayer.cpp88
8 files changed, 152 insertions, 141 deletions
diff --git a/arm_compute/runtime/CL/functions/CLBatchToSpaceLayer.h b/arm_compute/runtime/CL/functions/CLBatchToSpaceLayer.h
index 4b7cb60bc1..861330b9d4 100644
--- a/arm_compute/runtime/CL/functions/CLBatchToSpaceLayer.h
+++ b/arm_compute/runtime/CL/functions/CLBatchToSpaceLayer.h
@@ -67,18 +67,22 @@ public:
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
* @param[in] block_shape 1-D tensor with shape [M]. Data types supported: S32
* @param[out] output Tensor output. Data types supported: same as @p input
- * @param[in] crop_info Information about how the output shape is cropped after batch to space is performed
+ *
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*/
- void configure(const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output, const CropInfo &crop_info = CropInfo{});
+ ARM_COMPUTE_DEPRECATED_REL(23.05)
+ void configure(const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output);
/** Set the input and output tensors.
*
* @param[in] compile_context The compile context to be used.
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
* @param[in] block_shape 1-D tensor with shape [M]. Data types supported: S32
* @param[out] output Tensor output. Data types supported: same as @p input
- * @param[in] crop_info Information about how the output shape is cropped after batch to space is performed
+ *
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output, const CropInfo &crop_info = CropInfo{});
+ ARM_COMPUTE_DEPRECATED_REL(23.05)
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output);
/** Set the input and output tensors. (Static block shape).
*
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
@@ -103,11 +107,13 @@ public:
* @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: All.
* @param[in] block_shape block shape tensor info with shape [M]. Data types supported: S32
* @param[out] output Tensor output info. Data types supported: same as @p input
- * @param[in] crop_info Information about how the output shape is cropped after batch to space is performed
*
* @return a status
+ *
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *output, const CropInfo &crop_info = CropInfo{});
+ ARM_COMPUTE_DEPRECATED_REL(23.05)
+ static Status validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLBatchToSpaceLayer (Static block shape).
*
* @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: All.
diff --git a/docs/user_guide/release_version_and_change_log.dox b/docs/user_guide/release_version_and_change_log.dox
index aa9061f2b8..ddbeffd6a1 100644
--- a/docs/user_guide/release_version_and_change_log.dox
+++ b/docs/user_guide/release_version_and_change_log.dox
@@ -42,6 +42,7 @@ If there is more than one release in a month then an extra sequential number is
@section S2_2_changelog Changelog
v23.05 Public major release
- Deprecate dynamic block shape in @ref NEBatchToSpaceLayer
+ - Deprecate dynamic block shape in @ref CLBatchToSpaceLayer
v23.02.1 Public patch release
- Allow mismatching data layouts between the source tensor and weights for \link cpu::CpuGemmDirectConv2d CpuGemmDirectConv2d \endlink with fixed format kernels.
diff --git a/src/core/CL/cl_kernels/nchw/batch_to_space.cl b/src/core/CL/cl_kernels/nchw/batch_to_space.cl
index 89129cff3f..a813715b89 100644
--- a/src/core/CL/cl_kernels/nchw/batch_to_space.cl
+++ b/src/core/CL/cl_kernels/nchw/batch_to_space.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,6 +30,8 @@
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
* @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2
*
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
+ *
* @param[in] input_ptr Pointer to the source tensor. Supported data types: All
* @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
* @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
@@ -55,28 +57,27 @@
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
__kernel void batch_to_space_nchw(
- TENSOR3D_DECLARATION(input),
+ TENSOR4D_DECLARATION(input),
const int batch_id,
VECTOR_DECLARATION(block_shape),
- TENSOR4D_DECLARATION(output))
+ TENSOR3D_DECLARATION(output))
{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
Vector block = CONVERT_TO_VECTOR_STRUCT_NO_STEP(block_shape);
const int block_x = *((__global int *)vector_offset(&block, 0));
const int block_y = *((__global int *)vector_offset(&block, 1));
- const int r = (BATCH_SIZE / (block_x * block_y));
const int x = get_global_id(0);
const int y = get_global_id(1);
const int z = get_global_id(2);
- const int w = batch_id % r;
- const int out_x = x * block_x + (batch_id / r) % block_x;
- const int out_y = y * block_y + (batch_id / r) / block_x;
+ const int in_batch = batch_id + ((x % block_x) + (y % block_y) * block_x) * BATCH_SIZE;
+ const int in_x = x / block_x;
+ const int in_y = y / block_y;
- *((__global DATA_TYPE *)tensor4D_offset(&out, out_x, out_y, z, w)) = *((__global DATA_TYPE *)in.ptr);
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, in_x, in_y, z, in_batch));
}
#endif // defined(DATA_TYPE) && defined(BATCH_SIZE)
@@ -107,25 +108,24 @@ __kernel void batch_to_space_nchw(
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
__kernel void batch_to_space_static_nchw(
- TENSOR3D_DECLARATION(input),
+ TENSOR4D_DECLARATION(input),
const int batch_id,
- TENSOR4D_DECLARATION(output))
+ TENSOR3D_DECLARATION(output))
{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
const int block_x = BLOCK_SHAPE_X;
const int block_y = BLOCK_SHAPE_Y;
- const int r = (BATCH_SIZE / (block_x * block_y));
- const int x = get_global_id(0);
- const int y = get_global_id(1);
+ const int x = get_global_id(0) + CROP_LEFT;
+ const int y = get_global_id(1) + CROP_TOP;
const int z = get_global_id(2);
- const int w = batch_id % r;
- const int out_x = x * block_x + (batch_id / r) % block_x;
- const int out_y = y * block_y + (batch_id / r) / block_x;
+ const int in_batch = batch_id + ((x % block_x) + (y % block_y) * block_x) * BATCH_SIZE;
+ const int in_x = x / block_x;
+ const int in_y = y / block_y;
- *((__global DATA_TYPE *)tensor4D_offset(&out, out_x, out_y, z, w)) = *((__global DATA_TYPE *)in.ptr);
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, in_x, in_y, z, in_batch));
}
-#endif // defined(DATA_TYPE) && defined(BATCH_SIZE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y) \ No newline at end of file
+#endif // defined(DATA_TYPE) && defined(BATCH_SIZE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y)
diff --git a/src/core/CL/cl_kernels/nhwc/batch_to_space.cl b/src/core/CL/cl_kernels/nhwc/batch_to_space.cl
index a5334525fe..16141bcd2e 100644
--- a/src/core/CL/cl_kernels/nhwc/batch_to_space.cl
+++ b/src/core/CL/cl_kernels/nhwc/batch_to_space.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,6 +30,8 @@
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
* @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2
*
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
+ *
* @param[in] input_ptr Pointer to the source tensor. Supported data types: All
* @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
* @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
@@ -55,28 +57,27 @@
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
__kernel void batch_to_space_nhwc(
- TENSOR3D_DECLARATION(input),
+ TENSOR4D_DECLARATION(input),
const int batch_id,
VECTOR_DECLARATION(block_shape),
- TENSOR4D_DECLARATION(output))
+ TENSOR3D_DECLARATION(output))
{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
Vector block = CONVERT_TO_VECTOR_STRUCT_NO_STEP(block_shape);
const int block_x = *((__global int *)vector_offset(&block, 0));
const int block_y = *((__global int *)vector_offset(&block, 1));
- const int r = (BATCH_SIZE / (block_x * block_y));
const int x = get_global_id(1);
const int y = get_global_id(2);
const int z = get_global_id(0);
- const int w = batch_id % r;
- const int out_x = x * block_x + (batch_id / r) % block_x;
- const int out_y = y * block_y + (batch_id / r) / block_x;
+ const int in_batch = batch_id + ((x % block_x) + (y % block_y) * (block_x)) * BATCH_SIZE;
+ const int in_x = x / block_x;
+ const int in_y = y / block_y;
- *((__global DATA_TYPE *)tensor4D_offset(&out, z, out_x, out_y, w)) = *((__global DATA_TYPE *)in.ptr);
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, z, in_x, in_y, in_batch));
}
#endif // defined(DATA_TYPE) && defined(BATCH_SIZE)
@@ -107,25 +108,24 @@ __kernel void batch_to_space_nhwc(
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
__kernel void batch_to_space_static_nhwc(
- TENSOR3D_DECLARATION(input),
+ TENSOR4D_DECLARATION(input),
const int batch_id,
- TENSOR4D_DECLARATION(output))
+ TENSOR3D_DECLARATION(output))
{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
const int block_x = BLOCK_SHAPE_X;
const int block_y = BLOCK_SHAPE_Y;
- const int r = (BATCH_SIZE / (block_x * block_y));
- const int x = get_global_id(1);
- const int y = get_global_id(2);
+ const int x = get_global_id(1) + CROP_LEFT;
+ const int y = get_global_id(2) + CROP_TOP;
const int z = get_global_id(0);
- const int w = batch_id % r;
- const int out_x = x * block_x + (batch_id / r) % block_x;
- const int out_y = y * block_y + (batch_id / r) / block_x;
+ const int in_batch = batch_id + ((x % block_x) + (y % block_y) * (block_x)) * BATCH_SIZE;
+ const int in_x = x / block_x;
+ const int in_y = y / block_y;
- *((__global DATA_TYPE *)tensor4D_offset(&out, z, out_x, out_y, w)) = *((__global DATA_TYPE *)in.ptr);
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, z, in_x, in_y, in_batch));
}
#endif // defined(DATA_TYPE) && defined(BATCH_SIZE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y) \ No newline at end of file
diff --git a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp
index b47d5a7e38..c3dc91b8f6 100644
--- a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp
@@ -30,6 +30,7 @@
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "support/StringSupport.h"
+#include "arm_compute/core/TensorInfo.h"
using namespace arm_compute::misc::shape_calculator;
namespace arm_compute
@@ -52,7 +53,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *block_inf
return Status{};
}
-Status validate_arguments_static(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, const ITensorInfo *output)
+Status validate_arguments_static(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, const ITensorInfo *output, const CropInfo &crop_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
@@ -66,12 +67,9 @@ Status validate_arguments_static(const ITensorInfo *input, const int block_shape
// Validate output if initialized
if(output->total_size() != 0)
{
- const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
- const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
- ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_width] != (block_shape_x * input->tensor_shape()[idx_width]));
- ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_height] != (block_shape_y * input->tensor_shape()[idx_height]));
- ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_channel] != input->tensor_shape()[idx_channel]);
+ const TensorShape expected_output_shape = compute_batch_to_space_shape(input->data_layout(), input->tensor_shape(), block_shape_x, block_shape_y, crop_info);
+ const TensorInfo expected_output = output->clone()->set_tensor_shape(expected_output_shape);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &expected_output);
ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 4);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
@@ -103,52 +101,50 @@ void CLBatchToSpaceLayerKernel::configure(const CLCompileContext &compile_contex
_block_shape = block_shape;
_output = output;
- const int idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH);
-
// Create kernel
CLBuildOptions build_opts;
- build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(input->info()->dimension(3)));
- build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(input->info()->dimension(idx_width)));
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(output->info()->data_type()));
+ build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(output->info()->dimension(3)));
_kernel = create_kernel(compile_context, "batch_to_space_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
+
// Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps());
+ Window win = calculate_max_window(*output->info(), Steps());
ICLKernel::configure_internal(win);
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
-void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output)
+void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output, const CropInfo &crop_info)
{
- configure(CLKernelLibrary::get().get_compile_context(), input, block_shape_x, block_shape_y, output);
+ configure(CLKernelLibrary::get().get_compile_context(), input, block_shape_x, block_shape_y, output, crop_info);
}
-void CLBatchToSpaceLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output)
+void CLBatchToSpaceLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output,
+ const CropInfo &crop_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
const TensorShape output_shape = compute_batch_to_space_shape(input->info()->data_layout(), input->info()->tensor_shape(), block_shape_x, block_shape_y);
auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_static(input->info(), block_shape_x, block_shape_y, output->info()));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_static(input->info(), block_shape_x, block_shape_y, output->info(), crop_info));
_input = input;
_output = output;
- const int idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH);
-
// Create kernel
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(data_size_from_type(input->info()->data_type())));
- build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(input->info()->dimension(3)));
+ build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(output->info()->dimension(3)));
build_opts.add_option("-DBLOCK_SHAPE_X=" + support::cpp11::to_string(block_shape_x));
build_opts.add_option("-DBLOCK_SHAPE_Y=" + support::cpp11::to_string(block_shape_y));
- build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(input->info()->dimension(idx_width)));
+ build_opts.add_option("-DCROP_LEFT=" + support::cpp11::to_string(crop_info.left));
+ build_opts.add_option("-DCROP_TOP=" + support::cpp11::to_string(crop_info.top));
_kernel = create_kernel(compile_context, "batch_to_space_static_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options());
// Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps());
+ Window win = calculate_max_window(*output->info(), Steps());
ICLKernel::configure_internal(win);
}
@@ -159,10 +155,10 @@ Status CLBatchToSpaceLayerKernel::validate(const ITensorInfo *input, const ITens
return Status{};
}
-Status CLBatchToSpaceLayerKernel::validate(const ITensorInfo *input, const int32_t block_shape_x, const int32_t block_shape_y, const ITensorInfo *output)
+Status CLBatchToSpaceLayerKernel::validate(const ITensorInfo *input, const int32_t block_shape_x, const int32_t block_shape_y, const ITensorInfo *output, const CropInfo &crop_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_static(input, block_shape_x, block_shape_y, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_static(input, block_shape_x, block_shape_y, output, crop_info));
return Status{};
}
@@ -171,32 +167,32 @@ void CLBatchToSpaceLayerKernel::run(const Window &window, cl::CommandQueue &queu
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
- Window slice_in = window.first_slice_window_3D();
- Window slice_out = window.first_slice_window_4D();
+ Window slice_out = window.first_slice_window_3D();
+ Window slice_in = window.first_slice_window_4D();
Window vector_slice = window.first_slice_window_1D();
vector_slice.set(Window::DimX, Window::Dimension(0, 0, 0));
- slice_out.set(Window::DimX, Window::Dimension(0, 0, 0));
- slice_out.set(Window::DimY, Window::Dimension(0, 0, 0));
- slice_out.set(Window::DimZ, Window::Dimension(0, 0, 0));
- slice_out.set(3, Window::Dimension(0, 0, 0));
+ slice_in.set(Window::DimX, Window::Dimension(0, 0, 0));
+ slice_in.set(Window::DimY, Window::Dimension(0, 0, 0));
+ slice_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
+ slice_in.set(3, Window::Dimension(0, 0, 0));
int batch_id = 0;
do
{
unsigned int idx = 0;
- add_3D_tensor_argument(idx, _input, slice_in);
+ add_4D_tensor_argument(idx, _input, slice_in);
add_argument(idx, batch_id);
if(_block_shape != nullptr)
{
add_1D_tensor_argument(idx, _block_shape, vector_slice);
}
- add_4D_tensor_argument(idx, _output, slice_out);
- enqueue(queue, *this, slice_in, lws_hint());
+ add_3D_tensor_argument(idx, _output, slice_out);
+ enqueue(queue, *this, slice_out, lws_hint());
++batch_id;
}
- while(window.slide_window_slice_3D(slice_in));
+ while(window.slide_window_slice_3D(slice_out));
}
-} // namespace arm_compute
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.h b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.h
index 131a43e59c..a05184cd5b 100644
--- a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.h
+++ b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,6 +52,8 @@ public:
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
* @param[in] block_shape 1-D tensor with shape [M]. Data types supported: S32
* @param[out] output Tensor output. Data types supported: same as @p input
+ *
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*/
void configure(const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output);
/** Initialise the kernel's inputs and output.
@@ -60,6 +62,8 @@ public:
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
* @param[in] block_shape 1-D tensor with shape [M]. Data types supported: S32
* @param[out] output Tensor output. Data types supported: same as @p input
+ *
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*/
void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output);
/** Initialise the kernel's inputs and output (Static block shape).
@@ -68,8 +72,9 @@ public:
* @param[in] block_shape_x Block shape x value.
* @param[in] block_shape_y Block shape y value.
* @param[out] output Tensor output. Data types supported: same as @p input
+ * @param[in] crop_info Specifies how the output shape is cropped after batch to space is performed
*/
- void configure(const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output);
+ void configure(const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output, const CropInfo &crop_info);
/** Initialise the kernel's inputs and output (Static block shape).
*
* @param[in] compile_context The compile context to be used.
@@ -77,8 +82,9 @@ public:
* @param[in] block_shape_x Block shape x value.
* @param[in] block_shape_y Block shape y value.
* @param[out] output Tensor output. Data types supported: same as @p input
+ * @param[in] crop_info Specifies how the output shape is cropped after batch to space is performed
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, const ICLTensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ICLTensor *output, const CropInfo &crop_info);
/** Static function to check if given info will lead to a valid configuration of @ref CLBatchToSpaceLayerKernel
*
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
@@ -86,6 +92,8 @@ public:
* @param[in] output Tensor output. Data types supported: same as @p input
*
* @return a status
+ *
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLBatchToSpaceLayerKernel (Static block shape).
@@ -94,10 +102,11 @@ public:
* @param[in] block_shape_x Block shape x value.
* @param[in] block_shape_y Block shape y value.
* @param[in] output Tensor output. Data types supported: same as @p input
+ * @param[in] crop_info Specifies how the output shape is cropped after batch to space is performed
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const int32_t block_shape_x, const int32_t block_shape_y, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input, const int32_t block_shape_x, const int32_t block_shape_y, const ITensorInfo *output, const CropInfo &crop_info);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/src/runtime/CL/functions/CLBatchToSpaceLayer.cpp b/src/runtime/CL/functions/CLBatchToSpaceLayer.cpp
index d4342b456f..d7a409128d 100644
--- a/src/runtime/CL/functions/CLBatchToSpaceLayer.cpp
+++ b/src/runtime/CL/functions/CLBatchToSpaceLayer.cpp
@@ -43,14 +43,14 @@ CLBatchToSpaceLayer::CLBatchToSpaceLayer()
CLBatchToSpaceLayer::~CLBatchToSpaceLayer() = default;
-void CLBatchToSpaceLayer::configure(const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output, const CropInfo &crop_info)
+void CLBatchToSpaceLayer::configure(const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output)
{
- configure(CLKernelLibrary::get().get_compile_context(), input, block_shape, output, crop_info);
+ ARM_COMPUTE_LOG_PARAMS(input, block_shape, output);
+ _batch_to_space_kernel->configure(CLKernelLibrary::get().get_compile_context(), input, block_shape, output);
}
-void CLBatchToSpaceLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output, const CropInfo &crop_info)
+void CLBatchToSpaceLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *block_shape, ICLTensor *output)
{
- ARM_COMPUTE_UNUSED(crop_info);
ARM_COMPUTE_LOG_PARAMS(input, block_shape, output);
_batch_to_space_kernel->configure(compile_context, input, block_shape, output);
}
@@ -62,21 +62,18 @@ void CLBatchToSpaceLayer::configure(const ICLTensor *input, int32_t block_shape_
void CLBatchToSpaceLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, int32_t block_shape_x, int32_t block_shape_y, ICLTensor *output, const CropInfo &crop_info)
{
- ARM_COMPUTE_UNUSED(crop_info);
ARM_COMPUTE_LOG_PARAMS(input, block_shape_x, block_shape_y, output);
- _batch_to_space_kernel->configure(compile_context, input, block_shape_x, block_shape_y, output);
+ _batch_to_space_kernel->configure(compile_context, input, block_shape_x, block_shape_y, output, crop_info);
}
-Status CLBatchToSpaceLayer::validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *output, const CropInfo &crop_info)
+Status CLBatchToSpaceLayer::validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *output)
{
- ARM_COMPUTE_UNUSED(crop_info);
return CLBatchToSpaceLayerKernel::validate(input, block_shape, output);
}
Status CLBatchToSpaceLayer::validate(const ITensorInfo *input, int32_t block_shape_x, int32_t block_shape_y, const ITensorInfo *output, const CropInfo &crop_info)
{
- ARM_COMPUTE_UNUSED(crop_info);
- return CLBatchToSpaceLayerKernel::validate(input, block_shape_x, block_shape_y, output);
+ return CLBatchToSpaceLayerKernel::validate(input, block_shape_x, block_shape_y, output, crop_info);
}
void CLBatchToSpaceLayer::run()
diff --git a/tests/validation/CL/BatchToSpaceLayer.cpp b/tests/validation/CL/BatchToSpaceLayer.cpp
index e90ac921c5..ca12b76e8a 100644
--- a/tests/validation/CL/BatchToSpaceLayer.cpp
+++ b/tests/validation/CL/BatchToSpaceLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -50,56 +50,38 @@ using CLBatchToSpaceLayerFixture = BatchToSpaceLayerValidationFixture<CLTensor,
// *INDENT-OFF*
// clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
- framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // blockx != blocky && blockx > blocky
- TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // blockx != blocky && blocky > blockx
- TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), // Mismatching data types
- TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), // Wrong data type block shape
- TensorInfo(TensorShape(32U, 13U, 2U, 2U, 4U), 1, DataType::F32), // Wrong tensor shape
- }),
- framework::dataset::make("BlockShapeInfo",{ TensorInfo(TensorShape(2U, 2U), 1, DataType::S32),
- TensorInfo(TensorShape(2U, 4U), 1, DataType::S32),
- TensorInfo(TensorShape(4U, 2U), 1, DataType::S32),
- TensorInfo(TensorShape(2U, 2U), 1, DataType::S32),
- TensorInfo(TensorShape(2U, 2U), 1, DataType::F16),
- TensorInfo(TensorShape(2U, 2U), 1, DataType::S32),
- })),
- framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(64U, 16U, 2U, 1U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 32U, 2U, 1U), 1, DataType::F32),
-
- TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F16),
- TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32),
- })),
- framework::dataset::make("Expected", { true, true,true, false, false, false})),
- input_info, block_shape_info, output_info, expected)
-{
- bool has_error = bool(CLBatchToSpaceLayer::validate(&input_info.clone()->set_is_resizable(false), &block_shape_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false)));
- ARM_COMPUTE_EXPECT(has_error == expected, framework::LogLevel::ERRORS);
-}
-DATA_TEST_CASE(ValidateStatic, framework::DatasetMode::ALL, zip(zip(zip(zip(
+DATA_TEST_CASE(ValidateStatic, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 8U, 2U, 4U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // blockx != blocky && blockx > blocky
- TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // blockx != blocky && blocky > blockx
- TensorInfo(TensorShape(16U, 8U, 2U, 4U), 1, DataType::F32), // Mismatching data types
- TensorInfo(TensorShape(16U, 8U, 2U, 4U), 1, DataType::F32), // Negative block shapes
- TensorInfo(TensorShape(32U, 16U, 2U, 4U, 4U), 1, DataType::F32), // Wrong tensor shape
+ TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // Supported: blockx != blocky && blockx > blocky
+ TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // Supported: blockx != blocky && blocky > blockx
+ TensorInfo(TensorShape(16U, 8U, 2U, 4U), 1, DataType::F32), // Invalid: Mismatching data types
+ TensorInfo(TensorShape(16U, 8U, 2U, 4U), 1, DataType::F32), // Invalid: Negative block shapes
+ TensorInfo(TensorShape(32U, 16U, 2U, 4U, 4U), 1, DataType::F32),// Unsupported tensor rank
+ TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // Invalid output tensor shape (invalid batch dimension)
+ TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // Invalid output tensor shape (invalid spatial dimension)
+ TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // Supported: correct tensor shape with cropping
+ TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // Invalid tensor shape with cropping
}),
- framework::dataset::make("BlockShapeX", { 2, 4, 2, 2, 2, 2 })),
- framework::dataset::make("BlockShapeY", { 2, 2, 4, 2, -2, 2 })),
+ framework::dataset::make("BlockShapeX", { 2, 4, 2, 2, 2, 2, 2, 2, 2, 2 })),
+ framework::dataset::make("BlockShapeY", { 2, 2, 4, 2, -2, 2, 2, 2, 2, 2 })),
+ framework::dataset::make("CropInfo", {
+ CropInfo{}, CropInfo{}, CropInfo{}, CropInfo{}, CropInfo{}, CropInfo{}, CropInfo{}, CropInfo{}, CropInfo{3, 2, 1, 3}, CropInfo{3, 2, 1, 3}
+ })),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 16U, 2U, 1U), 1, DataType::F32),
- TensorInfo(TensorShape(64U, 16U, 2U, 1U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 32U, 2U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(64U, 16U, 2U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 32U, 2U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 16U, 2U, 1U), 1, DataType::F16),
TensorInfo(TensorShape(32U, 16U, 2U, 1U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 8U, 2U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 16U, 2U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(33U, 32U, 2U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(27, 12U, 2U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 16U, 2U, 4U), 1, DataType::F32),
})),
- framework::dataset::make("Expected", { true, true,true, false, false, false})),
- input_info, block_shape_x, block_shape_y, output_info, expected)
+ framework::dataset::make("Expected", { true, true, true, false, false, false, false, false, true, false})),
+ input_info, block_shape_x, block_shape_y, crop_info, output_info, expected)
{
- bool has_error = bool(CLBatchToSpaceLayer::validate(&input_info.clone()->set_is_resizable(false), block_shape_x, block_shape_y, &output_info.clone()->set_is_resizable(false)));
+ bool has_error = bool(CLBatchToSpaceLayer::validate(&input_info.clone()->set_is_resizable(false), block_shape_x, block_shape_y, &output_info.clone()->set_is_resizable(false), crop_info));
ARM_COMPUTE_EXPECT(has_error == expected, framework::LogLevel::ERRORS);
}
// clang-format on
@@ -114,6 +96,16 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchToSpaceLayerFixture<float>, framework::D
// Validate output
validate(CLAccessor(_target), _reference);
}
+
+FIXTURE_DATA_TEST_CASE(RunSmallWithCropping, CLBatchToSpaceLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(datasets::SmallBatchToSpaceLayerWithCroppingDataset(), framework::dataset::make("DataType",
+ DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchToSpaceLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeBatchToSpaceLayerDataset(), framework::dataset::make("DataType",
DataType::F32)),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
@@ -131,6 +123,16 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchToSpaceLayerFixture<half>, framework::Da
// Validate output
validate(CLAccessor(_target), _reference);
}
+
+FIXTURE_DATA_TEST_CASE(RunSmallWithCropping, CLBatchToSpaceLayerFixture<half>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(datasets::SmallBatchToSpaceLayerWithCroppingDataset(), framework::dataset::make("DataType",
+ DataType::F16)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchToSpaceLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeBatchToSpaceLayerDataset(), framework::dataset::make("DataType",
DataType::F16)),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))