diff options
-rw-r--r-- | arm_compute/core/utils/misc/ShapeCalculator.h | 12 | ||||
-rw-r--r-- | src/core/CL/CLKernelLibrary.cpp | 6 | ||||
-rw-r--r-- | src/core/CL/cl_kernels/batch_to_space.cl | 109 | ||||
-rw-r--r-- | src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp | 25 | ||||
-rw-r--r-- | tests/validation/CL/BatchToSpaceLayer.cpp | 20 | ||||
-rw-r--r-- | tests/validation/fixtures/BatchToSpaceLayerFixture.h | 21 |
6 files changed, 161 insertions, 32 deletions
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h index 806149f83f..4ae97f7c1f 100644 --- a/arm_compute/core/utils/misc/ShapeCalculator.h +++ b/arm_compute/core/utils/misc/ShapeCalculator.h @@ -524,10 +524,16 @@ inline TensorShape compute_strided_slice_shape(const ITensorInfo &input, inline TensorShape compute_batch_to_space_shape(const ITensorInfo *input, const int block_x, const int block_y) { ARM_COMPUTE_ERROR_ON(block_x <= 0 || block_y <= 0); + + const DataLayout data_layout = input->data_layout(); + const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); + TensorShape output_shape{ input->tensor_shape() }; - output_shape.set(0, input->tensor_shape()[0] * block_x); - output_shape.set(1, input->tensor_shape()[1] * block_y); - output_shape.set(3, input->tensor_shape()[3] / (block_x * block_y)); + output_shape.set(idx_width, input->tensor_shape()[idx_width] * block_x); + output_shape.set(idx_height, input->tensor_shape()[idx_height] * block_y); + output_shape.set(3, input->tensor_shape()[idx_channel] / (block_x * block_y)); return output_shape; } diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp index fa164542e4..8f5e81bae9 100644 --- a/src/core/CL/CLKernelLibrary.cpp +++ b/src/core/CL/CLKernelLibrary.cpp @@ -153,8 +153,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map = { "arithmetic_add", "arithmetic_op.cl" }, { "arithmetic_sub", "arithmetic_op.cl" }, { "arithmetic_div", "arithmetic_op.cl" }, - { "batch_to_space", "batch_to_space.cl" }, - { "batch_to_space_static", "batch_to_space.cl" }, + { "batch_to_space_nchw", "batch_to_space.cl" }, + { "batch_to_space_static_nchw", "batch_to_space.cl" }, + { "batch_to_space_nhwc", "batch_to_space.cl" }, + { "batch_to_space_static_nhwc", "batch_to_space.cl" }, { "batchnormalization_layer_nchw", "batchnormalization_layer.cl" }, { "batchnormalization_layer_nhwc", "batchnormalization_layer.cl" }, { "bitwise_or", "bitwise_op.cl" }, diff --git a/src/core/CL/cl_kernels/batch_to_space.cl b/src/core/CL/cl_kernels/batch_to_space.cl index 3043c2cf17..8506fc3709 100644 --- a/src/core/CL/cl_kernels/batch_to_space.cl +++ b/src/core/CL/cl_kernels/batch_to_space.cl @@ -24,7 +24,7 @@ #include "helpers.h" #if defined(DATA_TYPE) && defined(BATCH_SIZE) -/** Batch to space transformation. +/** Batch to space transformation. (NCHW) * * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float @@ -54,7 +54,7 @@ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor */ -__kernel void batch_to_space( +__kernel void batch_to_space_nchw( TENSOR3D_DECLARATION(input), const int batch_id, VECTOR_DECLARATION(block_shape), @@ -78,10 +78,64 @@ __kernel void batch_to_space( *((__global DATA_TYPE *)tensor4D_offset(&out, out_x, out_y, z, w)) = *((__global DATA_TYPE *)in.ptr); } +/** Batch to space transformation. (NHWC) + * + * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float + * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float + * @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2 + * + * @param[in] input_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 + * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor + * @param[in] batch_id The input tensor batch id + * @param[in] block_shape_ptr Pointer to the source tensor. Supported data types: S32 + * @param[in] block_shape_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] block_shape_step_x block_shape_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] block_shape_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] block_shape_step_y block_shape_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor + * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr + * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor + */ +__kernel void batch_to_space_nhwc( + TENSOR3D_DECLARATION(input), + const int batch_id, + VECTOR_DECLARATION(block_shape), + TENSOR4D_DECLARATION(output)) +{ + Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input); + Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0); + Vector block = CONVERT_TO_VECTOR_STRUCT_NO_STEP(block_shape); + + const int block_x = *((__global int *)vector_offset(&block, 0)); + const int block_y = *((__global int *)vector_offset(&block, 1)); + + const int r = (BATCH_SIZE / (block_x * block_y)); + const int x = get_global_id(1); + const int y = get_global_id(2); + const int z = get_global_id(0); + const int w = batch_id % r; + + const int out_x = x * block_x + (batch_id / r) % block_x; + const int out_y = y * block_y + (batch_id / r) / block_x; + + *((__global DATA_TYPE *)tensor4D_offset(&out, z, out_x, out_y, w)) = *((__global DATA_TYPE *)in.ptr); +} #endif // defined(DATA_TYPE) && defined(BATCH_SIZE) #if defined(DATA_TYPE) && defined(BATCH_SIZE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y) -/** Batch to space transformation. +/** Batch to space transformation. (NCHW) * * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float * @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2 @@ -106,7 +160,7 @@ __kernel void batch_to_space( * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes) * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor */ -__kernel void batch_to_space_static( +__kernel void batch_to_space_static_nchw( TENSOR3D_DECLARATION(input), const int batch_id, TENSOR4D_DECLARATION(output)) @@ -128,4 +182,51 @@ __kernel void batch_to_space_static( *((__global DATA_TYPE *)tensor4D_offset(&out, out_x, out_y, z, w)) = *((__global DATA_TYPE *)in.ptr); } +/** Batch to space transformation. (NHWC) + * + * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=float + * @note The input tensor batch size must be passed at compile time using -DBATCH_SIZE. e.g. -DBATCH_SIZE=2 + * @note The block shape x must be passed at compile time using -DBLOCK_SHAPE_X. e.g. -DBLOCK_SHAPE_X=2 + * @note The block shape y must be passed at compile time using -DBLOCK_SHAPE_Y. e.g. -DBLOCK_SHAPE_Y=2 + * + * @param[in] input_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32 + * @param[in] input_stride_x Stride of the source tensor in X dimension (in bytes) + * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] input_stride_y Stride of the source tensor in Y dimension (in bytes) + * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor + * @param[in] batch_id The input tensor batch id + * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr + * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes) + * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes) + * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes) + * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes) + * @param[in] output_stride_z Stride of the source tensor in Z dimension (in bytes) + * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes) + * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor + */ +__kernel void batch_to_space_static_nhwc( + TENSOR3D_DECLARATION(input), + const int batch_id, + TENSOR4D_DECLARATION(output)) +{ + Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input); + Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0); + + const int block_x = BLOCK_SHAPE_X; + const int block_y = BLOCK_SHAPE_Y; + + const int r = (BATCH_SIZE / (block_x * block_y)); + const int x = get_global_id(1); + const int y = get_global_id(2); + const int z = get_global_id(0); + const int w = batch_id % r; + + const int out_x = x * block_x + (batch_id / r) % block_x; + const int out_y = y * block_y + (batch_id / r) / block_x; + + *((__global DATA_TYPE *)tensor4D_offset(&out, z, out_x, out_y, w)) = *((__global DATA_TYPE *)in.ptr); +} #endif // defined(DATA_TYPE) && defined(BATCH_SIZE) && defined(BLOCK_SHAPE_X) && defined(BLOCK_SHAPE_Y)
\ No newline at end of file diff --git a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp index e08d6f6ec5..8f56f66845 100644 --- a/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp +++ b/src/core/CL/kernels/CLBatchToSpaceLayerKernel.cpp @@ -58,10 +58,15 @@ Status validate_arguments_static(const ITensorInfo *input, const int block_shape // Validate output if initialized if(output->total_size() != 0) { - ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[0] != (block_shape_x * output->tensor_shape()[0])); - ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[1] != (block_shape_x * output->tensor_shape()[1])); - ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[2] != output->tensor_shape()[2]); - ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[3] % (block_shape_x * block_shape_y) != 0); + const DataLayout data_layout = input->data_layout(); + const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); + const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); + const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES); + ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_width] != (block_shape_x * output->tensor_shape()[idx_width])); + ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_height] != (block_shape_x * output->tensor_shape()[idx_height])); + ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_channel] != output->tensor_shape()[idx_channel]); + ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_batch] % (block_shape_x * block_shape_y) != 0); ARM_COMPUTE_RETURN_ERROR_ON(output->num_dimensions() > 4); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); } @@ -84,12 +89,14 @@ void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input, const ICLTenso _block_shape = block_shape; _output = output; + const int idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH); + // Create kernel CLBuildOptions build_opts; build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(input->info()->dimension(3))); - build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(input->info()->dimension(0))); - _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("batch_to_space", build_opts.options())); + build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(input->info()->dimension(idx_width))); + _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("batch_to_space_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options())); // Configure kernel window Window win = calculate_max_window(*input->info(), Steps()); @@ -108,14 +115,16 @@ void CLBatchToSpaceLayerKernel::configure(const ICLTensor *input, const int32_t _input = input; _output = output; + const int idx_width = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::WIDTH); + // Create kernel CLBuildOptions build_opts; build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())); build_opts.add_option("-DBATCH_SIZE=" + support::cpp11::to_string(input->info()->dimension(3))); build_opts.add_option("-DBLOCK_SHAPE_X=" + support::cpp11::to_string(block_shape_x)); build_opts.add_option("-DBLOCK_SHAPE_Y=" + support::cpp11::to_string(block_shape_y)); - build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(input->info()->dimension(0))); - _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("batch_to_space_static", build_opts.options())); + build_opts.add_option("-DWIDTH_IN=" + support::cpp11::to_string(input->info()->dimension(idx_width))); + _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("batch_to_space_static_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options())); // Configure kernel window Window win = calculate_max_window(*input->info(), Steps()); diff --git a/tests/validation/CL/BatchToSpaceLayer.cpp b/tests/validation/CL/BatchToSpaceLayer.cpp index 79fee145db..db96571f1d 100644 --- a/tests/validation/CL/BatchToSpaceLayer.cpp +++ b/tests/validation/CL/BatchToSpaceLayer.cpp @@ -114,14 +114,16 @@ DATA_TEST_CASE(ValidateStatic, framework::DatasetMode::ALL, zip(zip(zip(zip( TEST_SUITE(Float) TEST_SUITE(FP32) -FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchToSpaceLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallBatchToSpaceLayerDataset(), framework::dataset::make("DataType", - DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchToSpaceLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallBatchToSpaceLayerDataset(), framework::dataset::make("DataType", + DataType::F32)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(CLAccessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchToSpaceLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeBatchToSpaceLayerDataset(), framework::dataset::make("DataType", - DataType::F32))) +FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchToSpaceLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeBatchToSpaceLayerDataset(), framework::dataset::make("DataType", + DataType::F32)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(CLAccessor(_target), _reference); @@ -129,14 +131,16 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchToSpaceLayerFixture<float>, framework::D TEST_SUITE_END() TEST_SUITE(FP16) -FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchToSpaceLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallBatchToSpaceLayerDataset(), framework::dataset::make("DataType", - DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchToSpaceLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallBatchToSpaceLayerDataset(), framework::dataset::make("DataType", + DataType::F16)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(CLAccessor(_target), _reference); } -FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchToSpaceLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeBatchToSpaceLayerDataset(), framework::dataset::make("DataType", - DataType::F16))) +FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchToSpaceLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeBatchToSpaceLayerDataset(), framework::dataset::make("DataType", + DataType::F16)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) { // Validate output validate(CLAccessor(_target), _reference); diff --git a/tests/validation/fixtures/BatchToSpaceLayerFixture.h b/tests/validation/fixtures/BatchToSpaceLayerFixture.h index f124fecb9d..973f2ed27b 100644 --- a/tests/validation/fixtures/BatchToSpaceLayerFixture.h +++ b/tests/validation/fixtures/BatchToSpaceLayerFixture.h @@ -40,9 +40,9 @@ class BatchToSpaceLayerValidationFixture : public framework::Fixture { public: template <typename...> - void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape output_shape, DataType data_type) + void setup(TensorShape input_shape, TensorShape block_shape_shape, TensorShape output_shape, DataType data_type, DataLayout data_layout) { - _target = compute_target(input_shape, block_shape_shape, output_shape, data_type); + _target = compute_target(input_shape, block_shape_shape, output_shape, data_type, data_layout); _reference = compute_reference(input_shape, block_shape_shape, output_shape, data_type); } @@ -53,13 +53,19 @@ protected: std::uniform_real_distribution<> distribution(-1.0f, 1.0f); library->fill(tensor, distribution, i); } - TensorType compute_target(const TensorShape &input_shape, const TensorShape &block_shape_shape, const TensorShape &output_shape, - DataType data_type) + TensorType compute_target(TensorShape input_shape, TensorShape block_shape_shape, TensorShape output_shape, + DataType data_type, DataLayout data_layout) { + if(data_layout == DataLayout::NHWC) + { + permute(input_shape, PermutationVector(2U, 0U, 1U)); + permute(output_shape, PermutationVector(2U, 0U, 1U)); + } + // Create tensors - TensorType input = create_tensor<TensorType>(input_shape, data_type); + TensorType input = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout); TensorType block_shape = create_tensor<TensorType>(block_shape_shape, DataType::S32); - TensorType output = create_tensor<TensorType>(output_shape, data_type); + TensorType output = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout); // Create and configure function FunctionType batch_to_space; @@ -82,9 +88,10 @@ protected: fill(AccessorType(input), 0); { auto block_shape_data = AccessorType(block_shape); + const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); for(unsigned int i = 0; i < block_shape_shape.x(); ++i) { - static_cast<int32_t *>(block_shape_data.data())[i] = output_shape[i] / input_shape[i]; + static_cast<int32_t *>(block_shape_data.data())[i] = output_shape[i + idx_width] / input_shape[i + idx_width]; } } // Compute function |