aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h2
-rw-r--r--arm_compute/runtime/NEON/functions/NEBatchToSpaceLayer.h15
-rw-r--r--docs/user_guide/release_version_and_change_log.dox3
-rw-r--r--src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp92
-rw-r--r--src/core/NEON/kernels/NEBatchToSpaceLayerKernel.h19
-rw-r--r--src/runtime/NEON/functions/NEBatchToSpaceLayer.cpp12
-rw-r--r--tests/validation/NEON/BatchToSpaceLayer.cpp86
7 files changed, 120 insertions, 109 deletions
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 916da1bd9d..9b1ebf63c2 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -1201,7 +1201,7 @@ inline TensorShape compute_split_shape(const ITensorInfo *input, unsigned int ax
*
* @return the calculated shape
*/
-inline TensorShape compute_space_to_batch_shape(const ITensorInfo *input, const int block_x, const int block_y, const Size2D &padding_left, const Size2D &padding_right)
+inline TensorShape compute_space_to_batch_shape(const ITensorInfo *input, int block_x, int block_y, const Size2D &padding_left, const Size2D &padding_right)
{
TensorShape output_shape{ input->tensor_shape() };
diff --git a/arm_compute/runtime/NEON/functions/NEBatchToSpaceLayer.h b/arm_compute/runtime/NEON/functions/NEBatchToSpaceLayer.h
index 92df8913ab..b33ba435a8 100644
--- a/arm_compute/runtime/NEON/functions/NEBatchToSpaceLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEBatchToSpaceLayer.h
@@ -64,16 +64,18 @@ public:
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
* @param[in] block_shape 1-D tensor with shape [M]. Data types supported: S32
* @param[out] output Tensor output. Data types supported: same as @p input
- * @param[in] crop_info Information about how the output shape is cropped after batch to space is performed
+ *
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*/
- void configure(const ITensor *input, const ITensor *block_shape, ITensor *output, const CropInfo &crop_info = CropInfo{});
+ ARM_COMPUTE_DEPRECATED_REL(23.05)
+ void configure(const ITensor *input, const ITensor *block_shape, ITensor *output);
/** Set the input and output tensors. (Static block shape).
*
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
* @param[in] block_shape_x Block shape x value.
* @param[in] block_shape_y Block shape y value.
* @param[out] output Tensor output. Data types supported: same as @p input
- * @param[in] crop_info Information about how the output shape is cropped after batch to space is performed
+ * @param[in] crop_info Specifies how the output shape is cropped after batch to space is performed
*/
void configure(const ITensor *input, int32_t block_shape_x, int32_t block_shape_y, ITensor *output, const CropInfo &crop_info = CropInfo{});
/** Static function to check if given info will lead to a valid configuration of @ref CLBatchToSpaceLayer
@@ -81,18 +83,19 @@ public:
* @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: All.
* @param[in] block_shape block shape tensor info with shape [M]. Data types supported: S32
* @param[out] output Tensor output info. Data types supported: same as @p input
- * @param[in] crop_info Information about how the output shape is cropped after batch to space is performed
*
* @return a status
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*/
- static Status validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *output, const CropInfo &crop_info = CropInfo{});
+ ARM_COMPUTE_DEPRECATED_REL(23.05)
+ static Status validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *output);
/** Static function to check if given info will lead to a valid configuration of @ref CLBatchToSpaceLayer (Static block shape).
*
* @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: All.
* @param[in] block_shape_x Block shape x value.
* @param[in] block_shape_y Block shape y value.
* @param[out] output Tensor output info. Data types supported: same as @p input
- * @param[in] crop_info Information about how the output shape is cropped after batch to space is performed
+ * @param[in] crop_info Specifies how the output shape is cropped after batch to space is performed
*
* @return a status
*/
diff --git a/docs/user_guide/release_version_and_change_log.dox b/docs/user_guide/release_version_and_change_log.dox
index 1bfb468ef1..aa9061f2b8 100644
--- a/docs/user_guide/release_version_and_change_log.dox
+++ b/docs/user_guide/release_version_and_change_log.dox
@@ -40,6 +40,9 @@ If there is more than one release in a month then an extra sequential number is
@note Starting from release 22.05, 'master' branch is no longer being used, it has been replaced by 'main'. Please update your clone jobs accordingly.
@section S2_2_changelog Changelog
+v23.05 Public major release
+ - Deprecate dynamic block shape in @ref NEBatchToSpaceLayer
+
v23.02.1 Public patch release
- Allow mismatching data layouts between the source tensor and weights for \link cpu::CpuGemmDirectConv2d CpuGemmDirectConv2d \endlink with fixed format kernels.
- Fixes for experimental CPU only Bazel and CMake builds.
diff --git a/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp b/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp
index 84c727df73..83fb5f6f51 100644
--- a/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.cpp
@@ -25,6 +25,7 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
@@ -53,7 +54,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *block_inf
return Status{};
}
-Status validate_arguments_static(const ITensorInfo *input, const int block_shape_x, const int block_shape_y, const ITensorInfo *output)
+Status validate_arguments_static(const ITensorInfo *input, int block_shape_x, int block_shape_y, const ITensorInfo *output, const CropInfo &crop_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
@@ -66,14 +67,12 @@ Status validate_arguments_static(const ITensorInfo *input, const int block_shape
// Validate output if initialized
if(output->total_size() != 0)
{
- const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
- const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
- ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_width] != (block_shape_x * input->tensor_shape()[idx_width]));
- ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_height] != (block_shape_y * input->tensor_shape()[idx_height]));
- ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_channel] != input->tensor_shape()[idx_channel]);
ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 4);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+
+ const TensorShape expected_output_shape = compute_batch_to_space_shape(input->data_layout(), input->tensor_shape(), block_shape_x, block_shape_y, crop_info);
+ const TensorInfo expected_output = output->clone()->set_tensor_shape(expected_output_shape);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &expected_output);
}
return Status{};
@@ -81,7 +80,7 @@ Status validate_arguments_static(const ITensorInfo *input, const int block_shape
} // namespace
NEBatchToSpaceLayerKernel::NEBatchToSpaceLayerKernel()
- : _input(nullptr), _block_shape(nullptr), _output(nullptr), _data_layout(DataLayout::UNKNOWN), _block_shape_x(), _block_shape_y()
+ : _input(nullptr), _block_shape(nullptr), _output(nullptr), _data_layout(DataLayout::UNKNOWN), _block_shape_x(), _block_shape_y(), _crop_info()
{
}
@@ -96,28 +95,29 @@ void NEBatchToSpaceLayerKernel::configure(const ITensor *input, const ITensor *b
_data_layout = input->info()->data_layout();
// Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps());
+ Window win = calculate_max_window(*output->info(), Steps());
ICPPKernel::configure(win);
}
-void NEBatchToSpaceLayerKernel::configure(const ITensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ITensor *output)
+void NEBatchToSpaceLayerKernel::configure(const ITensor *input, int32_t block_shape_x, int32_t block_shape_y, ITensor *output, const CropInfo &crop_info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
const TensorShape output_shape = compute_batch_to_space_shape(input->info()->data_layout(), input->info()->tensor_shape(), block_shape_x, block_shape_y);
- // Output auto inizialitation if not yet initialized
+ // Output auto initialization if not yet initialized
auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
// Perform validation step
- ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_static(input->info(), block_shape_x, block_shape_y, output->info()));
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_static(input->info(), block_shape_x, block_shape_y, output->info(), crop_info));
_input = input;
_output = output;
_block_shape_x = block_shape_x;
_block_shape_y = block_shape_y;
_data_layout = input->info()->data_layout();
+ _crop_info = crop_info;
// Configure kernel window
- Window win = calculate_max_window(*input->info(), Steps());
+ Window win = calculate_max_window(*output->info(), Steps());
ICPPKernel::configure(win);
}
@@ -128,10 +128,10 @@ Status NEBatchToSpaceLayerKernel::validate(const ITensorInfo *input, const ITens
return Status{};
}
-Status NEBatchToSpaceLayerKernel::validate(const ITensorInfo *input, const int32_t block_shape_x, const int32_t block_shape_y, const ITensorInfo *output)
+Status NEBatchToSpaceLayerKernel::validate(const ITensorInfo *input, int32_t block_shape_x, int32_t block_shape_y, const ITensorInfo *output, const CropInfo &crop_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_static(input, block_shape_x, block_shape_y, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_static(input, block_shape_x, block_shape_y, output, crop_info));
return Status{};
}
@@ -148,18 +148,10 @@ void NEBatchToSpaceLayerKernel::run(const Window &window, const ThreadInfo &info
_block_shape_y = *(reinterpret_cast<const int *>(_block_shape->ptr_to_element(1)));
}
- const int batch_size = _input->info()->dimension(3);
- const int r = (batch_size / (_block_shape_x * _block_shape_y));
- const int element_size = _input->info()->element_size();
-
- Window slice_in = window.first_slice_window_3D();
- Window slice_out = window.first_slice_window_4D();
+ const int batch_size = _output->info()->dimension(3);
+ const int element_size = _output->info()->element_size();
- // The slice_out slice does not move
- slice_out.set(Window::DimX, Window::Dimension(0, 0, 0));
- slice_out.set(Window::DimY, Window::Dimension(0, 0, 0));
- slice_out.set(Window::DimZ, Window::Dimension(0, 0, 0));
- slice_out.set(3, Window::Dimension(0, 0, 0));
+ Window slice_out = window.first_slice_window_3D();
int batch_id = 0;
// Main loop for NCHW and NHWC
@@ -167,47 +159,55 @@ void NEBatchToSpaceLayerKernel::run(const Window &window, const ThreadInfo &info
{
do
{
- Iterator in(_input, slice_in);
- execute_window_loop(slice_in, [&](const Coordinates & id)
+ Iterator out(_output, slice_out);
+ execute_window_loop(slice_out, [&](const Coordinates & id)
{
const int x = id.x();
const int y = id.y();
const int z = id.z();
-
- const int w = batch_id % r;
- const int out_x = x * _block_shape_x + (batch_id / r) % _block_shape_x;
- const int out_y = y * _block_shape_y + (batch_id / r) / _block_shape_x;
- Coordinates output_coords{ out_x, out_y, z, w };
- memcpy(_output->ptr_to_element(output_coords), in.ptr(), element_size);
+ // Translate x, y to uncropped version
+ const int x_c = x + _crop_info.left;
+ const int y_c = y + _crop_info.top;
+
+ const int in_batch = batch_id + ((x_c % _block_shape_x) + (y_c % _block_shape_y) * _block_shape_x) * batch_size;
+ const int in_x = x_c / _block_shape_x;
+ const int in_y = y_c / _block_shape_y;
+ Coordinates input_coords{ in_x, in_y, z, in_batch };
+ memcpy(out.ptr(), _input->ptr_to_element(input_coords), element_size);
},
- in);
+ out);
++batch_id;
}
- while(window.slide_window_slice_3D(slice_in));
+ while(window.slide_window_slice_3D(slice_out));
}
else
{
+ // For NHWC we can perform a block copy on the Channel (first) dimension. Thus we do not need to iterate over this dimension
+ slice_out.set(0U, Window::Dimension(0U, 1U, 1U));
do
{
- Iterator in(_input, slice_in);
- execute_window_loop(slice_in, [&](const Coordinates & id)
+ Iterator out(_output, slice_out);
+ execute_window_loop(slice_out, [&](const Coordinates & id)
{
- const int z = id.x();
const int x = id.y();
const int y = id.z();
- const int w = batch_id % r;
- const int out_x = x * _block_shape_x + (batch_id / r) % _block_shape_x;
- const int out_y = y * _block_shape_y + (batch_id / r) / _block_shape_x;
- Coordinates output_coords{ z, out_x, out_y, w };
- memcpy(_output->ptr_to_element(output_coords), in.ptr(), element_size);
+ // Translate x, y to uncropped version
+ const int x_c = x + _crop_info.left;
+ const int y_c = y + _crop_info.top;
+
+ const int in_batch = batch_id + ((x_c % _block_shape_x) + (y_c % _block_shape_y) * _block_shape_x) * batch_size;
+ const int in_x = x_c / _block_shape_x;
+ const int in_y = y_c / _block_shape_y;
+ Coordinates input_coords{ 0, in_x, in_y, in_batch };
+ memcpy(out.ptr(), _input->ptr_to_element(input_coords), element_size * _input->info()->dimension(0));
},
- in);
+ out);
++batch_id;
}
- while(window.slide_window_slice_3D(slice_in));
+ while(window.slide_window_slice_3D(slice_out));
}
}
} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.h b/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.h
index 26e8224922..5eceee0904 100644
--- a/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.h
+++ b/src/core/NEON/kernels/NEBatchToSpaceLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2020, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,6 +24,7 @@
#ifndef ARM_COMPUTE_NEBATCHTOSPACELAYERKERNEL_H
#define ARM_COMPUTE_NEBATCHTOSPACELAYERKERNEL_H
+#include "arm_compute/core/Types.h"
#include "src/core/NEON/INEKernel.h"
namespace arm_compute
@@ -55,6 +56,8 @@ public:
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
* @param[in] block_shape 1-D tensor with shape [M]. Data types supported: S32
* @param[out] output Tensor output. Data types supported: same as @p input
+ *
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*/
void configure(const ITensor *input, const ITensor *block_shape, ITensor *output);
/** Initialise the kernel's inputs and output (Static block shape).
@@ -63,8 +66,9 @@ public:
* @param[in] block_shape_x Block shape x value.
* @param[in] block_shape_y Block shape y value.
* @param[out] output Tensor output. Data types supported: same as @p input
+ * @param[in] crop_info Specifies how the output shape is cropped after batch to space is performed
*/
- void configure(const ITensor *input, const int32_t block_shape_x, const int32_t block_shape_y, ITensor *output);
+ void configure(const ITensor *input, int32_t block_shape_x, int32_t block_shape_y, ITensor *output, const CropInfo &crop_info = CropInfo{});
/** Static function to check if given info will lead to a valid configuration of @ref NEBatchToSpaceLayerKernel
*
* @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: All.
@@ -72,6 +76,8 @@ public:
* @param[in] output Tensor output. Data types supported: same as @p input
*
* @return a status
+ *
+ * @deprecated This method for dynamic block shape is not fully mature and will be removed in 23.08 release
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEBatchToSpaceLayerKernel (Static block shape).
@@ -80,10 +86,11 @@ public:
* @param[in] block_shape_x Block shape x value.
* @param[in] block_shape_y Block shape y value.
* @param[in] output Tensor output. Data types supported: same as @p input
+ * @param[in] crop_info Specifies how the output shape is cropped after batch to space is performed
*
* @return a status
*/
- static Status validate(const ITensorInfo *input, const int32_t block_shape_x, const int32_t block_shape_y, const ITensorInfo *output);
+ static Status validate(const ITensorInfo *input, int32_t block_shape_x, int32_t block_shape_y, const ITensorInfo *output, const CropInfo &crop_info = CropInfo{});
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
@@ -93,9 +100,9 @@ private:
const ITensor *_block_shape; /**< Block shape tensor */
ITensor *_output; /**< Destination tensor */
DataLayout _data_layout; /**< Data layout to be used at run-time */
-
- int32_t _block_shape_x;
- int32_t _block_shape_y;
+ int32_t _block_shape_x;
+ int32_t _block_shape_y;
+ CropInfo _crop_info; /**< Information related to cropping performed on output after the operation */
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_NEBATCHTOSPACELAYERKERNEL_H */
diff --git a/src/runtime/NEON/functions/NEBatchToSpaceLayer.cpp b/src/runtime/NEON/functions/NEBatchToSpaceLayer.cpp
index b62fdad7a1..e258028d05 100644
--- a/src/runtime/NEON/functions/NEBatchToSpaceLayer.cpp
+++ b/src/runtime/NEON/functions/NEBatchToSpaceLayer.cpp
@@ -33,9 +33,8 @@
namespace arm_compute
{
-void NEBatchToSpaceLayer::configure(const ITensor *input, const ITensor *block_shape, ITensor *output, const CropInfo &crop_info)
+void NEBatchToSpaceLayer::configure(const ITensor *input, const ITensor *block_shape, ITensor *output)
{
- ARM_COMPUTE_UNUSED(crop_info);
ARM_COMPUTE_LOG_PARAMS(input, block_shape, output);
auto k = std::make_unique<NEBatchToSpaceLayerKernel>();
k->configure(input, block_shape, output);
@@ -44,21 +43,18 @@ void NEBatchToSpaceLayer::configure(const ITensor *input, const ITensor *block_s
void NEBatchToSpaceLayer::configure(const ITensor *input, int32_t block_shape_x, int32_t block_shape_y, ITensor *output, const CropInfo &crop_info)
{
- ARM_COMPUTE_UNUSED(crop_info);
auto k = std::make_unique<NEBatchToSpaceLayerKernel>();
- k->configure(input, block_shape_x, block_shape_y, output);
+ k->configure(input, block_shape_x, block_shape_y, output, crop_info);
_kernel = std::move(k);
}
-Status NEBatchToSpaceLayer::validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *output, const CropInfo &crop_info)
+Status NEBatchToSpaceLayer::validate(const ITensorInfo *input, const ITensorInfo *block_shape, const ITensorInfo *output)
{
- ARM_COMPUTE_UNUSED(crop_info);
return NEBatchToSpaceLayerKernel::validate(input, block_shape, output);
}
Status NEBatchToSpaceLayer::validate(const ITensorInfo *input, int32_t block_shape_x, int32_t block_shape_y, const ITensorInfo *output, const CropInfo &crop_info)
{
- ARM_COMPUTE_UNUSED(crop_info);
- return NEBatchToSpaceLayerKernel::validate(input, block_shape_x, block_shape_y, output);
+ return NEBatchToSpaceLayerKernel::validate(input, block_shape_x, block_shape_y, output, crop_info);
}
} // namespace arm_compute
diff --git a/tests/validation/NEON/BatchToSpaceLayer.cpp b/tests/validation/NEON/BatchToSpaceLayer.cpp
index a305dcbcc4..8cf11b7b95 100644
--- a/tests/validation/NEON/BatchToSpaceLayer.cpp
+++ b/tests/validation/NEON/BatchToSpaceLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021 Arm Limited.
+ * Copyright (c) 2019-2021, 2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,55 +49,38 @@ using NEBatchToSpaceLayerFixture = BatchToSpaceLayerValidationFixture<Tensor, Ac
// *INDENT-OFF*
// clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
- framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // blockx != blocky && blockx > blocky
- TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // blockx != blocky && blocky > blockx
- TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), // Mismatching data types
- TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32), // Wrong data type block shape
- TensorInfo(TensorShape(32U, 13U, 2U, 2U, 4U), 1, DataType::F32), // Wrong tensor shape
- }),
- framework::dataset::make("BlockShapeInfo",{ TensorInfo(TensorShape(2U, 2U), 1, DataType::S32),
- TensorInfo(TensorShape(2U, 2U), 1, DataType::S32),
- TensorInfo(TensorShape(2U, 4U), 1, DataType::S32),
- TensorInfo(TensorShape(4U, 2U), 1, DataType::S32),
- TensorInfo(TensorShape(2U, 2U), 1, DataType::F16),
- TensorInfo(TensorShape(2U, 2U), 1, DataType::S32),
- })),
- framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(64U, 16U, 2U, 1U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 32U, 2U, 1U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F16),
- TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 13U, 2U, 2U), 1, DataType::F32),
- })),
- framework::dataset::make("Expected", { true, true, true, false, false, false})),
- input_info, block_shape_info, output_info, expected)
-{
- bool has_error = bool(NEBatchToSpaceLayer::validate(&input_info.clone()->set_is_resizable(false), &block_shape_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false)));
- ARM_COMPUTE_EXPECT(has_error == expected, framework::LogLevel::ERRORS);
-}
-DATA_TEST_CASE(ValidateStatic, framework::DatasetMode::ALL, zip(zip(zip(zip(
+DATA_TEST_CASE(ValidateStatic, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
framework::dataset::make("InputInfo", { TensorInfo(TensorShape(16U, 8U, 2U, 4U), 1, DataType::F32),
- TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // blockx != blocky && blockx > blocky
- TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // blockx != blocky && blocky > blockx
- TensorInfo(TensorShape(16U, 8U, 2U, 4U), 1, DataType::F32), // Mismatching data types
- TensorInfo(TensorShape(16U, 8U, 2U, 4U), 1, DataType::F32), // Negative block shapes
- TensorInfo(TensorShape(32U, 16U, 2U, 4U, 4U), 1, DataType::F32), // Wrong tensor shape
+ TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // Supported: blockx != blocky && blockx > blocky
+ TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // Supported: blockx != blocky && blocky > blockx
+ TensorInfo(TensorShape(16U, 8U, 2U, 4U), 1, DataType::F32), // Invalid: Mismatching data types
+ TensorInfo(TensorShape(16U, 8U, 2U, 4U), 1, DataType::F32), // Invalid: Negative block shapes
+ TensorInfo(TensorShape(32U, 16U, 2U, 4U, 4U), 1, DataType::F32),// Unsupported tensor rank
+ TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // Invalid output tensor shape (invalid batch dimension)
+ TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // Invalid output tensor shape (invalid spatial dimension)
+ TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // Supported: correct tensor shape with cropping
+ TensorInfo(TensorShape(16U, 8U, 2U, 16U), 1, DataType::F32), // Invalid tensor shape with cropping
}),
- framework::dataset::make("BlockShapeX", { 2, 4, 2, 2, 2, 2 })),
- framework::dataset::make("BlockShapeY", { 2, 2, 4, 2, -2, 2 })),
+ framework::dataset::make("BlockShapeX", { 2, 4, 2, 2, 2, 2, 2, 2, 2, 2 })),
+ framework::dataset::make("BlockShapeY", { 2, 2, 4, 2, -2, 2, 2, 2, 2, 2 })),
+ framework::dataset::make("CropInfo", {
+ CropInfo{}, CropInfo{}, CropInfo{}, CropInfo{}, CropInfo{}, CropInfo{}, CropInfo{}, CropInfo{}, CropInfo{3, 2, 1, 3}, CropInfo{3, 2, 1, 3}
+ })),
framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 16U, 2U, 1U), 1, DataType::F32),
- TensorInfo(TensorShape(64U, 16U, 2U, 1U), 1, DataType::F32),
- TensorInfo(TensorShape(32U, 32U, 2U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(64U, 16U, 2U, 2U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 32U, 2U, 2U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 16U, 2U, 1U), 1, DataType::F16),
TensorInfo(TensorShape(32U, 16U, 2U, 1U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 8U, 2U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 16U, 2U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(33U, 32U, 2U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(27, 12U, 2U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 16U, 2U, 4U), 1, DataType::F32),
})),
- framework::dataset::make("Expected", { true, true, true, false, false, false})),
- input_info, block_shape_x, block_shape_y, output_info, expected)
+ framework::dataset::make("Expected", { true, true, true, false, false, false, false, false, true, false})),
+ input_info, block_shape_x, block_shape_y, crop_info, output_info, expected)
{
- bool has_error = bool(NEBatchToSpaceLayer::validate(&input_info.clone()->set_is_resizable(false), block_shape_x, block_shape_y, &output_info.clone()->set_is_resizable(false)));
+ bool has_error = bool(NEBatchToSpaceLayer::validate(&input_info.clone()->set_is_resizable(false), block_shape_x, block_shape_y, &output_info.clone()->set_is_resizable(false), crop_info));
ARM_COMPUTE_EXPECT(has_error == expected, framework::LogLevel::ERRORS);
}
// clang-format on
@@ -112,6 +95,16 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEBatchToSpaceLayerFixture<float>, framework::D
// Validate output
validate(Accessor(_target), _reference);
}
+
+FIXTURE_DATA_TEST_CASE(RunSmallWithCropping, NEBatchToSpaceLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(datasets::SmallBatchToSpaceLayerWithCroppingDataset(), framework::dataset::make("DataType",
+ DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+
FIXTURE_DATA_TEST_CASE(RunLarge, NEBatchToSpaceLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeBatchToSpaceLayerDataset(), framework::dataset::make("DataType",
DataType::F32)),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
@@ -129,6 +122,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEBatchToSpaceLayerFixture<half>, framework::Da
// Validate output
validate(Accessor(_target), _reference);
}
+FIXTURE_DATA_TEST_CASE(RunSmallWithCropping, NEBatchToSpaceLayerFixture<half>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(datasets::SmallBatchToSpaceLayerWithCroppingDataset(), framework::dataset::make("DataType",
+ DataType::F16)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+
FIXTURE_DATA_TEST_CASE(RunLarge, NEBatchToSpaceLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeBatchToSpaceLayerDataset(), framework::dataset::make("DataType",
DataType::F16)),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))