aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2018-09-18 13:09:30 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:54 +0000
commit13a51e11680aa24a9b841a4afe4079419bc8b80c (patch)
treeae9125b2e791d41fa4e2f8d9527ad3b9259eede0
parenta799ce0ad775829862891dd98d1232638ec8761e (diff)
downloadComputeLibrary-13a51e11680aa24a9b841a4afe4079419bc8b80c.tar.gz
COMPMID-1554 Implementing Space to Batch on OpenCL - NHWC
Change-Id: Ifa37a6758f79d0a6ca771dcfb4c55a5d96b452d0 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/148892 Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Tested-by: bsgcomp <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h16
-rw-r--r--src/core/CL/CLKernelLibrary.cpp6
-rw-r--r--src/core/CL/cl_kernels/space_to_batch.cl125
-rw-r--r--src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp41
-rw-r--r--tests/validation/CL/SpaceToBatchLayer.cpp20
-rw-r--r--tests/validation/fixtures/SpaceToBatchFixture.h23
6 files changed, 193 insertions, 38 deletions
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 2db7b28161..e88fd8d75e 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -528,12 +528,12 @@ inline TensorShape compute_batch_to_space_shape(const ITensorInfo *input, const
const DataLayout data_layout = input->data_layout();
const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
- const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+ const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
TensorShape output_shape{ input->tensor_shape() };
output_shape.set(idx_width, input->tensor_shape()[idx_width] * block_x);
output_shape.set(idx_height, input->tensor_shape()[idx_height] * block_y);
- output_shape.set(3, input->tensor_shape()[idx_channel] / (block_x * block_y));
+ output_shape.set(idx_batch, input->tensor_shape()[idx_batch] / (block_x * block_y));
return output_shape;
}
@@ -566,9 +566,15 @@ inline TensorShape compute_split_shape(const ITensorInfo *input, unsigned int ax
inline TensorShape compute_space_to_batch_shape(const ITensorInfo *input, const int block_x, const int block_y, const Size2D &padding_left, const Size2D &padding_right)
{
TensorShape output_shape{ input->tensor_shape() };
- output_shape.set(0, input->tensor_shape()[0] * block_x + padding_left.x() + padding_right.x());
- output_shape.set(1, input->tensor_shape()[1] * block_y + padding_left.y() + padding_right.y());
- output_shape.set(3, input->tensor_shape()[3] / (block_x * block_y));
+
+ const DataLayout data_layout = input->data_layout();
+ const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+
+ output_shape.set(idx_width, input->tensor_shape()[idx_width] * block_x + padding_left.x() + padding_right.x());
+ output_shape.set(idx_height, input->tensor_shape()[idx_height] * block_y + padding_left.y() + padding_right.y());
+ output_shape.set(idx_batch, input->tensor_shape()[idx_batch] / (block_x * block_y));
return output_shape;
}
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 1bc1036bed..a7a95b8b94 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -374,8 +374,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "softmax_layer_max_shift_exp_sum_quantized_serial", "softmax_layer_quantized.cl" },
{ "softmax_layer_max_shift_exp_sum_quantized_parallel", "softmax_layer_quantized.cl" },
{ "softmax_layer_max_shift_exp_sum_serial", "softmax_layer.cl" },
- { "space_to_batch", "space_to_batch.cl" },
- { "space_to_batch_static", "space_to_batch.cl" },
+ { "space_to_batch_nchw", "space_to_batch.cl" },
+ { "space_to_batch_static_nchw", "space_to_batch.cl" },
+ { "space_to_batch_nhwc", "space_to_batch.cl" },
+ { "space_to_batch_static_nhwc", "space_to_batch.cl" },
{ "softmax_layer_max_shift_exp_sum_parallel", "softmax_layer.cl" },
{ "strided_slice", "slice_ops.cl" },
{ "suppress_non_maximum", "canny.cl" },
diff --git a/src/core/CL/cl_kernels/space_to_batch.cl b/src/core/CL/cl_kernels/space_to_batch.cl
index 1343695ed1..1d641cc0f4 100644
--- a/src/core/CL/cl_kernels/space_to_batch.cl
+++ b/src/core/CL/cl_kernels/space_to_batch.cl
@@ -59,7 +59,7 @@
* @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
*/
-__kernel void space_to_batch(
+__kernel void space_to_batch_nchw(
TENSOR4D_DECLARATION(input),
IMAGE_DECLARATION(paddings),
VECTOR_DECLARATION(block_shape),
@@ -92,6 +92,74 @@ __kernel void space_to_batch(
*((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, in_x, in_y, z, w));
}
}
+/** Calculate the space to batch conversion. (NHWC)
+ *
+ * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
+ * @note The block shape tensor rank must be passed at compile time using -DBLOCK_SHAPE_DIM. e.g. -DBLOCK_SHAPE_DIM=2
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source image
+ * @param[in] paddings_ptr Pointer to the second source image. Supported data types: S32
+ * @param[in] paddings_stride_x Stride of the paddinds tensor in X dimension (in bytes)
+ * @param[in] paddings_step_x paddings_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] paddings_stride_y Stride of the paddinds tensor in Y dimension (in bytes)
+ * @param[in] paddings_step_y paddings_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] paddingse_offset_first_element_in_bytes The offset of the first element in the second source image
+ * @param[in] block_shape_ptr Pointer to the block shape tensor. Supported data types: S32
+ * @param[in] block_shape_stride_x Stride of the block shape tensor in X dimension (in bytes)
+ * @param[in] block_shape_step_x block_shape_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] block_shape_stride_y Stride of the block shape tensor in Y dimension (in bytes)
+ * @param[in] block_shape_step_y block_shape_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] block_shape_offset_first_element_in_bytes The offset of the first element in the block shapetensor
+ * @param[in] batch_id The output tensor batch id
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ */
+__kernel void space_to_batch_nhwc(
+ TENSOR4D_DECLARATION(input),
+ IMAGE_DECLARATION(paddings),
+ VECTOR_DECLARATION(block_shape),
+ const int batch_id,
+ TENSOR3D_DECLARATION(output))
+{
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
+ Image pad = CONVERT_TO_IMAGE_STRUCT_NO_STEP(paddings);
+ Vector block = CONVERT_TO_VECTOR_STRUCT_NO_STEP(block_shape);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+
+ const int PAD_LEFT_X = *((__global int *)offset(&pad, 0, 0));
+ const int PAD_RIGHT_X = *((__global int *)offset(&pad, 1, 0));
+ const int PAD_LEFT_Y = *((__global int *)offset(&pad, 0, 1));
+ const int PAD_RIGHT_Y = *((__global int *)offset(&pad, 1, 1));
+
+ int block_x = *((__global int *)vector_offset(&block, 0));
+ int block_y = *((__global int *)vector_offset(&block, 1));
+
+ const int out_x = get_global_id(1);
+ const int out_y = get_global_id(2);
+ const int z = get_global_id(0);
+
+ if((out_x >= PAD_LEFT_X && out_x < WIDTH_OUT - PAD_RIGHT_X) && (out_y >= PAD_LEFT_Y && out_y < HEIGHT_OUT - PAD_RIGHT_Y))
+ {
+ const int r = (BATCH_SIZE / (block_x * block_y));
+ const int w = batch_id % r;
+ const int in_x = (out_x - PAD_LEFT_X) * block_x + (batch_id / r) % block_x;
+ const int in_y = (out_y - PAD_LEFT_Y) * block_y + (batch_id / r) / block_x;
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, z, in_x, in_y, w));
+ }
+}
#endif // defined(BATCH_SIZE) && defined(DATA_TYPE)
#if defined(BATCH_SIZE) && defined(DATA_TYPE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y) && defined(PAD_LEFT_X) && defined(PAD_RIGHT_X) && defined(PAD_LEFT_Y) && defined(PAD_RIGHT_Y)
@@ -124,7 +192,7 @@ __kernel void space_to_batch(
* @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
*/
-__kernel void space_to_batch_static(
+__kernel void space_to_batch_static_nchw(
TENSOR4D_DECLARATION(input),
const int batch_id,
TENSOR3D_DECLARATION(output))
@@ -148,4 +216,57 @@ __kernel void space_to_batch_static(
*((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, in_x, in_y, z, w));
}
}
+/** Calculate the space to batch conversion. (NHWC)
+ *
+ * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float
+ * @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2
+ * @note The block shape x must be passed at compile time using -DBLOCK_SHAPE_X. e.g. -DBLOCK_SHAPE_X=2
+ * @note The block shape y must be passed at compile time using -DBLOCK_SHAPE_Y. e.g. -DBLOCK_SHAPE_Y=2
+ * @note The starting pad value of x must be passed at compile time using -DPAD_LEFT_X. e.g. -DPAD_LEFT_X=2
+ * @note The ending pad value of x must be passed at compile time using -DPAD_RIGHT_X. e.g. -DPAD_RIGHT_X=2
+ * @note The starting pad value of y must be passed at compile time using -DPAD_LEFT_Y. e.g. -DPAD_LEFT_Y=2
+ * @note The ending pad value of y must be passed at compile time using -DPAD_RIGHT_Y. e.g. -DPAD_RIGHT_X=2
+ *
+ * @param[in] input_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source image
+ * @param[in] batch_id The output tensor batch id
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination image
+ */
+__kernel void space_to_batch_static_nhwc(
+ TENSOR4D_DECLARATION(input),
+ const int batch_id,
+ TENSOR3D_DECLARATION(output))
+{
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+
+ int block_x = BLOCK_SHAPE_X;
+ int block_y = *((__global int *)vector_offset(&block, 1));
+
+ const int out_x = get_global_id(1);
+ const int out_y = get_global_id(2);
+ const int z = get_global_id(0);
+
+ if((out_x >= PAD_LEFT_X && out_x < WIDTH_OUT - PAD_RIGHT_X) && (out_y >= PAD_LEFT_Y && out_y < HEIGHT_OUT - PAD_RIGHT_Y))
+ {
+ const int r = (BATCH_SIZE / (block_x * block_y));
+ const int w = batch_id % r;
+ const int in_x = (out_x - PAD_LEFT_X) * block_x + (batch_id / r) % block_x;
+ const int in_y = (out_y - PAD_LEFT_Y) * block_y + (batch_id / r) / block_x;
+ *((__global DATA_TYPE *)out.ptr) = *((__global DATA_TYPE *)tensor4D_offset(&in, z, in_x, in_y, w));
+ }
+}
#endif // defined(BATCH_SIZE) && defined(DATA_TYPE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y) && defined(PAD_LEFT_X) && defined(PAD_RIGHT_X) && defined(PAD_LEFT_Y) && defined(PAD_RIGHT_Y)
diff --git a/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp b/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp
index cda6e96806..9e4010e6c6 100644
--- a/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp
+++ b/src/core/CL/kernels/CLSpaceToBatchLayerKernel.cpp
@@ -58,11 +58,16 @@ Status validate_arguments_static(const ITensorInfo *input, const int block_shape
// Validate output if initialized
if(output->total_size() != 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[0] < padding_left.x() + padding_right.y());
- ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[0] / block_shape_x != (output->tensor_shape()[0] - padding_left.x() - padding_right.y()));
- ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[1] / block_shape_y != (output->tensor_shape()[1] - padding_left.x() - padding_right.y()));
- ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[2] != output->tensor_shape()[2]);
- ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[3] % (block_shape_x * block_shape_y) != 0);
+ const DataLayout data_layout = input->data_layout();
+ const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+ const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+ ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_width] < padding_left.x() + padding_right.y());
+ ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_width] / block_shape_x != (output->tensor_shape()[idx_width] - padding_left.x() - padding_right.y()));
+ ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_height] / block_shape_y != (output->tensor_shape()[idx_height] - padding_left.x() - padding_right.y()));
+ ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channel] != output->tensor_shape()[idx_channel]);
+ ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_batch] % (block_shape_x * block_shape_y) != 0);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
}
@@ -85,13 +90,18 @@ void CLSpaceToBatchLayerKernel::configure(const ICLTensor *input, const ICLTenso
_paddings = paddings;
_output = output;
+ const DataLayout data_layout = input->info()->data_layout();
+ const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+
// Create kernel
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DWIDTH_OUT=" + support::cpp11::to_string(output->info()->dimension(0)));
- build_opts.add_option("-DHEIGHT_OUT=" + support::cpp11::to_string(output->info()->dimension(1)));
- build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(output->info()->dimension(3)));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("space_to_batch", build_opts.options()));
+ build_opts.add_option("-DWIDTH_OUT=" + support::cpp11::to_string(output->info()->dimension(idx_width)));
+ build_opts.add_option("-DHEIGHT_OUT=" + support::cpp11::to_string(output->info()->dimension(idx_height)));
+ build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(output->info()->dimension(idx_batch)));
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("space_to_batch_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options()));
// Configure kernel window
Window win = calculate_max_window(*output->info(), Steps());
@@ -111,19 +121,24 @@ void CLSpaceToBatchLayerKernel::configure(const ICLTensor *input, const int bloc
_input = input;
_output = output;
+ const DataLayout data_layout = input->info()->data_layout();
+ const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+
// Create kernel
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.add_option("-DWIDTH_OUT=" + support::cpp11::to_string(output->info()->dimension(0)));
- build_opts.add_option("-DHEIGHT_OUT=" + support::cpp11::to_string(output->info()->dimension(1)));
- build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(output->info()->dimension(3)));
+ build_opts.add_option("-DWIDTH_OUT=" + support::cpp11::to_string(output->info()->dimension(idx_width)));
+ build_opts.add_option("-DHEIGHT_OUT=" + support::cpp11::to_string(output->info()->dimension(idx_height)));
+ build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(output->info()->dimension(idx_batch)));
build_opts.add_option("-DBLOCK_SHAPE_X=" + support::cpp11::to_string(block_shape_x));
build_opts.add_option("-DBLOCK_SHAPE_Y=" + support::cpp11::to_string(block_shape_y));
build_opts.add_option("-DPAD_START_X=" + support::cpp11::to_string(padding_left.x()));
build_opts.add_option("-DPAD_END_X=" + support::cpp11::to_string(padding_right.x()));
build_opts.add_option("-DPAD_START_Y=" + support::cpp11::to_string(padding_left.y()));
build_opts.add_option("-DPAD_END_Y=" + support::cpp11::to_string(padding_right.y()));
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("space_to_batch_static", build_opts.options()));
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("space_to_batch_static_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options()));
// Configure kernel window
Window win = calculate_max_window(*output->info(), Steps());
diff --git a/tests/validation/CL/SpaceToBatchLayer.cpp b/tests/validation/CL/SpaceToBatchLayer.cpp
index 63d4022d08..1c3d54dc81 100644
--- a/tests/validation/CL/SpaceToBatchLayer.cpp
+++ b/tests/validation/CL/SpaceToBatchLayer.cpp
@@ -124,14 +124,16 @@ DATA_TEST_CASE(ValidateStatic, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
TEST_SUITE(Float)
TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(Small, CLSpaceToBatchLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
- DataType::F32)))
+FIXTURE_DATA_TEST_CASE(Small, CLSpaceToBatchLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
+ DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
- DataType::F32)))
+FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
+ DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference);
@@ -139,14 +141,16 @@ FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture<float>, framework::Data
TEST_SUITE_END() // FP32
TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(Small, CLSpaceToBatchLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
- DataType::F16)))
+FIXTURE_DATA_TEST_CASE(Small, CLSpaceToBatchLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
+ DataType::F16)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference);
}
-FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
- DataType::F16)))
+FIXTURE_DATA_TEST_CASE(Large, CLSpaceToBatchLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToBatchLayerDataset(), framework::dataset::make("DataType",
+ DataType::F16)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference);
diff --git a/tests/validation/fixtures/SpaceToBatchFixture.h b/tests/validation/fixtures/SpaceToBatchFixture.h
index 26c24c1331..a304162998 100644
--- a/tests/validation/fixtures/SpaceToBatchFixture.h
+++ b/tests/validation/fixtures/SpaceToBatchFixture.h
@@ -40,9 +40,9 @@ class SpaceToBatchLayerValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape, DataType data_type)
+ void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape paddings_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout)
{
- _target = compute_target(input_shape, block_shape_shape, paddings_shape, output_shape, data_type);
+ _target = compute_target(input_shape, block_shape_shape, paddings_shape, output_shape, data_type, data_layout);
_reference = compute_reference(input_shape, block_shape_shape, paddings_shape, output_shape, data_type);
}
@@ -59,14 +59,20 @@ protected:
std::uniform_int_distribution<> distribution(0, 0);
library->fill(tensor, distribution, i);
}
- TensorType compute_target(const TensorShape &input_shape, const TensorShape &block_shape_shape, const TensorShape &paddings_shape, const TensorShape &output_shape,
- DataType data_type)
+ TensorType compute_target(TensorShape input_shape, const TensorShape &block_shape_shape, const TensorShape &paddings_shape, TensorShape output_shape,
+ DataType data_type, DataLayout data_layout)
{
+ if(data_layout == DataLayout::NHWC)
+ {
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(output_shape, PermutationVector(2U, 0U, 1U));
+ }
+
// Create tensors
- TensorType input = create_tensor<TensorType>(input_shape, data_type);
+ TensorType input = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
TensorType block_shape = create_tensor<TensorType>(block_shape_shape, DataType::S32);
TensorType paddings = create_tensor<TensorType>(paddings_shape, DataType::S32);
- TensorType output = create_tensor<TensorType>(output_shape, data_type);
+ TensorType output = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
// Create and configure function
FunctionType space_to_batch;
@@ -92,10 +98,11 @@ protected:
fill(AccessorType(input), 0);
fill_pad(AccessorType(paddings), 0);
{
- auto block_shape_data = AccessorType(block_shape);
+ auto block_shape_data = AccessorType(block_shape);
+ const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
for(unsigned int i = 0; i < block_shape_shape.x(); ++i)
{
- static_cast<int32_t *>(block_shape_data.data())[i] = input_shape[i] / output_shape[i];
+ static_cast<int32_t *>(block_shape_data.data())[i] = input_shape[i + idx_width] / output_shape[i + idx_width];
}
}
// Compute function