aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2019-05-17 14:04:22 +0100
committerManuel Bottini <manuel.bottini@arm.com>2019-05-29 14:58:16 +0000
commit5b7d537d918becb894d94d91726ce79e63d72fc1 (patch)
treee2375032d20cae479d026061a075ca63800d4532
parent088d7b4c6b9bde97b3b7f83ebce047377c810997 (diff)
downloadComputeLibrary-5b7d537d918becb894d94d91726ce79e63d72fc1.tar.gz
COMPMID-2237
Implement SPACE_TO_DEPTH for NEON Change-Id: I9f427bceca6da52671e0096be08772612f4be152 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/1227 Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
-rw-r--r--arm_compute/core/NEON/NEKernels.h1
-rw-r--r--arm_compute/core/NEON/kernels/NESpaceToDepthLayerKernel.h80
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h23
-rw-r--r--arm_compute/runtime/NEON/NEFunctions.h1
-rw-r--r--arm_compute/runtime/NEON/functions/NESpaceToDepthLayer.h80
-rw-r--r--src/core/NEON/kernels/NESpaceToBatchLayerKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NESpaceToDepthLayerKernel.cpp153
-rw-r--r--src/runtime/NEON/functions/NESpaceToDepthLayer.cpp56
-rw-r--r--tests/datasets/SpaceToDepthDataset.h135
-rw-r--r--tests/validation/NEON/SpaceToDepthLayer.cpp113
-rw-r--r--tests/validation/fixtures/SpaceToDepthFixture.h110
-rw-r--r--tests/validation/reference/SpaceToDepth.cpp84
-rw-r--r--tests/validation/reference/SpaceToDepth.h44
13 files changed, 880 insertions, 6 deletions
diff --git a/arm_compute/core/NEON/NEKernels.h b/arm_compute/core/NEON/NEKernels.h
index d19b0c71ba..119f9b8702 100644
--- a/arm_compute/core/NEON/NEKernels.h
+++ b/arm_compute/core/NEON/NEKernels.h
@@ -129,6 +129,7 @@
#include "arm_compute/core/NEON/kernels/NESobel7x7Kernel.h"
#include "arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NESpaceToBatchLayerKernel.h"
+#include "arm_compute/core/NEON/kernels/NESpaceToDepthLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NEStackLayerKernel.h"
#include "arm_compute/core/NEON/kernels/NEStridedSliceKernel.h"
#include "arm_compute/core/NEON/kernels/NETableLookupKernel.h"
diff --git a/arm_compute/core/NEON/kernels/NESpaceToDepthLayerKernel.h b/arm_compute/core/NEON/kernels/NESpaceToDepthLayerKernel.h
new file mode 100644
index 0000000000..c9ecdd26f8
--- /dev/null
+++ b/arm_compute/core/NEON/kernels/NESpaceToDepthLayerKernel.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NESPACETODEPTHLAYERKERNEL_H__
+#define __ARM_COMPUTE_NESPACETODEPTHLAYERKERNEL_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Interface for the space to depth kernel */
+class NESpaceToDepthLayerKernel : public INEKernel
+{
+public:
+ const char *name() const override
+ {
+ return "NESpaceToDepthLayerKernel";
+ }
+ /** Default constructor */
+ NESpaceToDepthLayerKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NESpaceToDepthLayerKernel(const NESpaceToDepthLayerKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NESpaceToDepthLayerKernel &operator=(const NESpaceToDepthLayerKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ NESpaceToDepthLayerKernel(NESpaceToDepthLayerKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ NESpaceToDepthLayerKernel &operator=(NESpaceToDepthLayerKernel &&) = default;
+ /** Default destructor */
+ ~NESpaceToDepthLayerKernel() = default;
+ /** Initialise the kernel's inputs and output.
+ *
+ * @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * @param[out] output Tensor output. Data types supported: same as @p input
+ * @param[in] block_shape Block shape value
+ */
+ void configure(const ITensor *input, ITensor *output, int32_t block_shape);
+ /** Static function to check if given info will lead to a valid configuration of @ref NESpaceToDepthLayerKernel
+ *
+ * @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * @param[in] output Tensor output info. Data types supported: same as @p input
+ * @param[in] block_shape Block shape value
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+
+private:
+ const ITensor *_input; /**< Source tensor */
+ ITensor *_output; /**< Destination tensor */
+ int32_t _block_shape; /**< Block shape */
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NESPACETODEPTHLAYERKERNEL_H__ */
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 8394bbaed8..7eab17ba11 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -1058,6 +1058,29 @@ inline TensorShape compute_space_to_batch_shape(const ITensorInfo *input, const
return output_shape;
}
+/** Calculate the space to batch output shape of a tensor
+ *
+ * @param[in] input Input tensor info
+ * @param[in] block_shape Block shape value
+ *
+ * @return the calculated shape
+ */
+inline TensorShape compute_space_to_depth_shape(const ITensorInfo *input, int32_t block_shape)
+{
+ TensorShape output_shape{ input->tensor_shape() };
+
+ const DataLayout data_layout = input->data_layout();
+ const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const int idx_depth = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+
+ output_shape.set(idx_width, input->tensor_shape()[idx_width] * block_shape);
+ output_shape.set(idx_height, input->tensor_shape()[idx_height] * block_shape);
+ output_shape.set(idx_depth, input->tensor_shape()[idx_depth] / (block_shape * block_shape));
+
+ return output_shape;
+}
+
/** Calculate the prior box output shape of a tensor
*
* @param[in] input Input tensor info
diff --git a/arm_compute/runtime/NEON/NEFunctions.h b/arm_compute/runtime/NEON/NEFunctions.h
index 971ddfa703..4e0cdd7a0a 100644
--- a/arm_compute/runtime/NEON/NEFunctions.h
+++ b/arm_compute/runtime/NEON/NEFunctions.h
@@ -132,6 +132,7 @@
#include "arm_compute/runtime/NEON/functions/NESobel7x7.h"
#include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h"
#include "arm_compute/runtime/NEON/functions/NESpaceToBatchLayer.h"
+#include "arm_compute/runtime/NEON/functions/NESpaceToDepthLayer.h"
#include "arm_compute/runtime/NEON/functions/NESplit.h"
#include "arm_compute/runtime/NEON/functions/NEStackLayer.h"
#include "arm_compute/runtime/NEON/functions/NEStridedSlice.h"
diff --git a/arm_compute/runtime/NEON/functions/NESpaceToDepthLayer.h b/arm_compute/runtime/NEON/functions/NESpaceToDepthLayer.h
new file mode 100644
index 0000000000..abcfed44b8
--- /dev/null
+++ b/arm_compute/runtime/NEON/functions/NESpaceToDepthLayer.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NESPACETODEPTHLAYER_H__
+#define __ARM_COMPUTE_NESPACETODEPTHLAYER_H__
+
+#include "arm_compute/runtime/IFunction.h"
+
+#include "arm_compute/core/NEON/kernels/NEMemsetKernel.h"
+#include "arm_compute/core/NEON/kernels/NESpaceToDepthLayerKernel.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** This function calls the following NEON kernels/functions:
+ *
+ * -# @ref NESpaceToDepthLayerKernel
+ */
+class NESpaceToDepthLayer : public IFunction
+{
+public:
+ /** Default constructor */
+ NESpaceToDepthLayer();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NESpaceToDepthLayer(const NESpaceToDepthLayer &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NESpaceToDepthLayer &operator=(const NESpaceToDepthLayer &) = delete;
+ /** Allow instances of this class to be moved */
+ NESpaceToDepthLayer(NESpaceToDepthLayer &&) = default;
+ /** Allow instances of this class to be moved */
+ NESpaceToDepthLayer &operator=(NESpaceToDepthLayer &&) = default;
+ /** Default destructor */
+ virtual ~NESpaceToDepthLayer() = default;
+ /** Set the input and output tensors.
+ *
+ * @param[in] input Tensor input. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * @param[out] output Tensor output. Data types supported: same as @p input
+ * @param[in] block_shape Block shape value
+ */
+ void configure(const ITensor *input, ITensor *output, int32_t block_shape);
+ /** Static function to check if given info will lead to a valid configuration of @ref NESpaceToDepthLayer (Static block shape and paddings)
+ *
+ * @param[in] input Tensor input info. Supported tensor rank: 4. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+ * @param[in] output Tensor output info. Data types supported: same as @p input
+ * @param[in] block_shape Block shape value
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ NESpaceToDepthLayerKernel _space_to_depth_kernel; /**< SpaceToDepth kernel to run */
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_NESPACETODEPTHLAYER_H__ */
diff --git a/src/core/NEON/kernels/NESpaceToBatchLayerKernel.cpp b/src/core/NEON/kernels/NESpaceToBatchLayerKernel.cpp
index 2e46b149e3..511a109777 100644
--- a/src/core/NEON/kernels/NESpaceToBatchLayerKernel.cpp
+++ b/src/core/NEON/kernels/NESpaceToBatchLayerKernel.cpp
@@ -167,12 +167,6 @@ void NESpaceToBatchLayerKernel::run(const Window &window, const ThreadInfo &info
const size_t batch_size = _input->info()->dimension(3);
Window slice_out = window.first_slice_window_3D();
- Window slice_in = window.first_slice_window_4D();
-
- slice_in.set(Window::DimX, Window::Dimension(0, 0, 0));
- slice_in.set(Window::DimY, Window::Dimension(0, 0, 0));
- slice_in.set(Window::DimZ, Window::Dimension(0, 0, 0));
- slice_in.set(3, Window::Dimension(0, 0, 0));
int batch_id = 0;
diff --git a/src/core/NEON/kernels/NESpaceToDepthLayerKernel.cpp b/src/core/NEON/kernels/NESpaceToDepthLayerKernel.cpp
new file mode 100644
index 0000000000..4803365013
--- /dev/null
+++ b/src/core/NEON/kernels/NESpaceToDepthLayerKernel.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NESpaceToDepthLayerKernel.h"
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/wrapper/wrapper.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include <arm_neon.h>
+#include <cstdint>
+
+using namespace arm_compute::misc::shape_calculator;
+
+namespace arm_compute
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
+
+ ARM_COMPUTE_RETURN_ERROR_ON(block_shape < 1);
+
+ // Validate output if initialized
+ if(output->total_size() != 0)
+ {
+ const DataLayout data_layout = input->data_layout();
+ const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
+ const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ const int idx_channel = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+ const int idx_batch = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_width] % block_shape != 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_height] % block_shape != 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape()[idx_batch] != output->tensor_shape()[idx_batch]);
+ ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape()[idx_channel] % (block_shape * block_shape) != 0);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().total_size() != output->tensor_shape().total_size());
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ }
+
+ return Status{};
+}
+} // namespace
+
+NESpaceToDepthLayerKernel::NESpaceToDepthLayerKernel()
+ : _input(nullptr), _output(nullptr), _block_shape()
+{
+}
+
+void NESpaceToDepthLayerKernel::configure(const ITensor *input, ITensor *output, int32_t block_shape)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+ TensorShape output_shape = misc::shape_calculator::compute_space_to_depth_shape(input->info(), block_shape);
+ auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type());
+
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), block_shape));
+
+ _input = input;
+ _block_shape = block_shape;
+ _output = output;
+
+ // Configure kernel window
+ Window win = calculate_max_window(*output->info(), Steps());
+ INEKernel::configure(win);
+}
+
+Status NESpaceToDepthLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, block_shape));
+ return Status{};
+}
+
+void NESpaceToDepthLayerKernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICPPKernel::window(), window);
+
+ const DataLayout data_layout = _input->info()->data_layout();
+ const int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+ const int element_size = _input->info()->element_size();
+
+ const size_t channel_size = _input->info()->dimension(channel_idx);
+
+ Window slice_out = window.first_slice_window_3D();
+
+ int batch_id = 0;
+
+ // Main loop for NCHW and NHWC
+ if(_output->info()->data_layout() == DataLayout::NCHW)
+ {
+ do
+ {
+ Iterator out(_output, slice_out);
+ execute_window_loop(slice_out, [&](const Coordinates & id)
+ {
+ const size_t channel_id = id.z();
+ const size_t in_x = id.x() * _block_shape + (channel_id / channel_size) % _block_shape;
+ const size_t in_y = id.y() * _block_shape + (channel_id / channel_size) / _block_shape;
+ const int z = channel_id % channel_size;
+ Coordinates input_coords{ in_x, in_y, z, batch_id };
+ memcpy(out.ptr(), _input->ptr_to_element(input_coords), element_size);
+ },
+ out);
+ ++batch_id;
+ }
+ while(window.slide_window_slice_3D(slice_out));
+ }
+ else
+ {
+ do
+ {
+ Iterator out(_output, slice_out);
+ execute_window_loop(slice_out, [&](const Coordinates & id)
+ {
+ const size_t channel_id = id.x();
+ const size_t in_x = id.y() * _block_shape + (channel_id / channel_size) % _block_shape;
+ const size_t in_y = id.z() * _block_shape + (channel_id / channel_size) / _block_shape;
+ const int z = channel_id % channel_size;
+ Coordinates input_coords{ z, in_x, in_y, batch_id };
+ memcpy(out.ptr(), _input->ptr_to_element(input_coords), element_size);
+ },
+ out);
+ ++batch_id;
+ }
+ while(window.slide_window_slice_3D(slice_out));
+ }
+}
+} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NESpaceToDepthLayer.cpp b/src/runtime/NEON/functions/NESpaceToDepthLayer.cpp
new file mode 100644
index 0000000000..18d82918c7
--- /dev/null
+++ b/src/runtime/NEON/functions/NESpaceToDepthLayer.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/runtime/NEON/functions/NESpaceToDepthLayer.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+
+namespace arm_compute
+{
+NESpaceToDepthLayer::NESpaceToDepthLayer()
+ : _space_to_depth_kernel()
+{
+}
+
+void NESpaceToDepthLayer::configure(const ITensor *input, ITensor *output, int32_t block_shape)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ _space_to_depth_kernel.configure(input, output, block_shape);
+}
+
+Status NESpaceToDepthLayer::validate(const ITensorInfo *input, const ITensorInfo *output, int32_t block_shape)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(NESpaceToDepthLayerKernel::validate(input, output, block_shape));
+ return Status{};
+}
+
+void NESpaceToDepthLayer::run()
+{
+ NEScheduler::get().schedule(&_space_to_depth_kernel, Window::DimY);
+}
+} // namespace arm_compute
diff --git a/tests/datasets/SpaceToDepthDataset.h b/tests/datasets/SpaceToDepthDataset.h
new file mode 100644
index 0000000000..563a475d79
--- /dev/null
+++ b/tests/datasets/SpaceToDepthDataset.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_SPACE_TO_DEPTH_LAYER_DATASET
+#define ARM_COMPUTE_TEST_SPACE_TO_DEPTH_LAYER_DATASET
+
+#include "utils/TypePrinter.h"
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class SpaceToDepthLayerDataset
+{
+public:
+ using type = std::tuple<TensorShape, TensorShape, int32_t>;
+
+ struct iterator
+ {
+ iterator(std::vector<TensorShape>::const_iterator src_it,
+ std::vector<TensorShape>::const_iterator dst_it,
+ std::vector<int>::const_iterator block_shape_it)
+ : _src_it{ std::move(src_it) },
+ _dst_it{ std::move(dst_it) },
+ _block_shape_it{ std::move(block_shape_it) }
+ {
+ }
+
+ std::string description() const
+ {
+ std::stringstream description;
+ description << "In=" << *_src_it << ":";
+ description << "Out=" << *_dst_it;
+ description << "BlockShape=" << *_block_shape_it << ":";
+ return description.str();
+ }
+
+ SpaceToDepthLayerDataset::type operator*() const
+ {
+ return std::make_tuple(*_src_it, *_dst_it, *_block_shape_it);
+ }
+
+ iterator &operator++()
+ {
+ ++_src_it;
+ ++_dst_it;
+ ++_block_shape_it;
+
+ return *this;
+ }
+
+ private:
+ std::vector<TensorShape>::const_iterator _src_it;
+ std::vector<TensorShape>::const_iterator _dst_it;
+ std::vector<int32_t>::const_iterator _block_shape_it;
+ };
+
+ iterator begin() const
+ {
+ return iterator(_src_shapes.begin(), _dst_shapes.begin(), _block_shape.begin());
+ }
+
+ int size() const
+ {
+ return std::min(_src_shapes.size(), std::min(_dst_shapes.size(), _block_shape.size()));
+ }
+
+ void add_config(TensorShape src, TensorShape dst, int32_t block_shape)
+ {
+ _src_shapes.emplace_back(std::move(src));
+ _dst_shapes.emplace_back(std::move(dst));
+ _block_shape.emplace_back(std::move(block_shape));
+ }
+
+protected:
+ SpaceToDepthLayerDataset() = default;
+ SpaceToDepthLayerDataset(SpaceToDepthLayerDataset &&) = default;
+
+private:
+ std::vector<TensorShape> _src_shapes{};
+ std::vector<TensorShape> _dst_shapes{};
+ std::vector<int32_t> _block_shape{};
+};
+
+class SmallSpaceToDepthLayerDataset final : public SpaceToDepthLayerDataset
+{
+public:
+ SmallSpaceToDepthLayerDataset()
+ {
+ add_config(TensorShape(2U, 2U, 1U, 1U), TensorShape(1U, 1U, 4U, 1U), 2);
+ add_config(TensorShape(6U, 2U, 1U, 1U), TensorShape(3U, 1U, 4U, 1U), 2);
+ add_config(TensorShape(2U, 4U, 2U, 1U), TensorShape(1U, 2U, 8U, 1U), 2);
+ add_config(TensorShape(2U, 6U, 1U, 2U), TensorShape(1U, 3U, 4U, 2U), 2);
+ add_config(TensorShape(6U, 8U, 1U, 1U), TensorShape(3U, 4U, 4U, 1U), 2);
+ add_config(TensorShape(6U, 8U, 15U, 5U), TensorShape(3U, 4U, 60U, 5U), 2);
+ }
+};
+class LargeSpaceToDepthLayerDataset final : public SpaceToDepthLayerDataset
+{
+public:
+ LargeSpaceToDepthLayerDataset()
+ {
+ add_config(TensorShape(128U, 64U, 2U, 1U), TensorShape(64U, 32U, 8U, 1U), 2);
+ add_config(TensorShape(512U, 64U, 2U, 1U), TensorShape(128U, 16U, 8U, 4U), 2);
+ }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_SPACE_TO_DEPTH_LAYER_DATASET */
diff --git a/tests/validation/NEON/SpaceToDepthLayer.cpp b/tests/validation/NEON/SpaceToDepthLayer.cpp
new file mode 100644
index 0000000000..8d2726168c
--- /dev/null
+++ b/tests/validation/NEON/SpaceToDepthLayer.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NESpaceToDepthLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/datasets/SpaceToDepthDataset.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/SpaceToDepthFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(NEON)
+TEST_SUITE(SpaceToDepthLayer)
+
+template <typename T>
+using NESpaceToDepthLayerFixture = SpaceToDepthLayerValidationFixture<Tensor, Accessor, NESpaceToDepthLayer, T>;
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(ValidateStatic, framework::DatasetMode::ALL, zip(zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 16U, 2U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 16U, 2U, 1U), 1, DataType::F32), // Mismatching data types
+ TensorInfo(TensorShape(32U, 16U, 2U, 1U), 1, DataType::F32), // Negative block shapes
+ TensorInfo(TensorShape(32U, 16U, 2U, 1U, 4U), 1, DataType::F32), // Wrong tensor shape
+ }),
+ framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(16U, 8U, 8U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 8U, 8U, 1U), 1, DataType::F16),
+ TensorInfo(TensorShape(32U, 8U, 8U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(32U, 8U, 8U, 1U), 1, DataType::F32),
+ })),
+ framework::dataset::make("BlockShape", { 2, 2, -2, 2 })),
+ framework::dataset::make("Expected", { true, false, false, false})),
+ input_info, output_info, block_shape, expected)
+{
+ bool has_error = bool(NESpaceToDepthLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), block_shape));
+ ARM_COMPUTE_EXPECT(has_error == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(Small, NESpaceToDepthLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToDepthLayerDataset(), framework::dataset::make("DataType",
+ DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(Large, NESpaceToDepthLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToDepthLayerDataset(), framework::dataset::make("DataType",
+ DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // FP32
+
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(Small, NESpaceToDepthLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallSpaceToDepthLayerDataset(), framework::dataset::make("DataType",
+ DataType::F16)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(Large, NESpaceToDepthLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeSpaceToDepthLayerDataset(), framework::dataset::make("DataType",
+ DataType::F16)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END() // FP16
+TEST_SUITE_END() // Float
+
+TEST_SUITE_END() // SpaceToDepthLayer
+TEST_SUITE_END() // NEON
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/SpaceToDepthFixture.h b/tests/validation/fixtures/SpaceToDepthFixture.h
new file mode 100644
index 0000000000..170fdfa397
--- /dev/null
+++ b/tests/validation/fixtures/SpaceToDepthFixture.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_SPACE_TO_DEPTH_LAYER_FIXTURE
+#define ARM_COMPUTE_TEST_SPACE_TO_DEPTH_LAYER_FIXTURE
+
+#include "tests/Globals.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/SpaceToDepth.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class SpaceToDepthLayerValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape input_shape, TensorShape output_shape, const int block_shape, DataType data_type, DataLayout data_layout)
+ {
+ _target = compute_target(input_shape, output_shape, block_shape, data_type, data_layout);
+ _reference = compute_reference(input_shape, output_shape, block_shape, data_type);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+ library->fill(tensor, distribution, i);
+ }
+ TensorType compute_target(TensorShape input_shape, TensorShape output_shape, const int block_shape,
+ DataType data_type, DataLayout data_layout)
+ {
+ if(data_layout == DataLayout::NHWC)
+ {
+ permute(input_shape, PermutationVector(2U, 0U, 1U));
+ permute(output_shape, PermutationVector(2U, 0U, 1U));
+ }
+
+ // Create tensors
+ TensorType input = create_tensor<TensorType>(input_shape, data_type, 1, QuantizationInfo(), data_layout);
+ TensorType output = create_tensor<TensorType>(output_shape, data_type, 1, QuantizationInfo(), data_layout);
+
+ // Create and configure function
+ FunctionType space_to_depth;
+ space_to_depth.configure(&input, &output, block_shape);
+
+ ARM_COMPUTE_EXPECT(input.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ input.allocator()->allocate();
+ output.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!input.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!output.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(input), 0);
+ // Compute function
+ space_to_depth.run();
+
+ return output;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape,
+ const int block_shape, DataType data_type)
+ {
+ // Create reference
+ SimpleTensor<T> input{ input_shape, data_type };
+
+ // Fill reference
+ fill(input, 0);
+
+ // Compute reference
+ return reference::space_to_depth(input, output_shape, block_shape);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_SPACE_TO_DEPTH_LAYER_FIXTURE */
diff --git a/tests/validation/reference/SpaceToDepth.cpp b/tests/validation/reference/SpaceToDepth.cpp
new file mode 100644
index 0000000000..bd8e37a8bf
--- /dev/null
+++ b/tests/validation/reference/SpaceToDepth.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "SpaceToDepth.h"
+
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+// Space to Batch
+template <typename T>
+SimpleTensor<T> space_to_depth(const SimpleTensor<T> &src, const TensorShape &dst_shape, const int block_shape)
+{
+ SimpleTensor<T> result(dst_shape, src.data_type());
+
+ const auto width_out = static_cast<int>(dst_shape[0]);
+ const auto height_out = static_cast<int>(dst_shape[1]);
+ const auto channel_out = static_cast<int>(dst_shape[2]);
+
+ const auto width_in = static_cast<int>(src.shape()[0]);
+ const auto height_in = static_cast<int>(src.shape()[1]);
+ const auto channel_in = static_cast<int>(src.shape()[2]);
+
+ const auto batch = static_cast<int>(src.shape()[3]);
+
+ const auto block_width = block_shape;
+ const auto block_height = block_shape;
+
+ int out_pos = 0;
+ for(int ba = 0; ba < batch; ++ba)
+ {
+ for(int outC = 0; outC < channel_out; ++outC)
+ {
+ unsigned int inC = outC % channel_in;
+
+ int shift_w = (outC / channel_in) % block_width;
+ int shift_h = (outC / channel_in) / block_width;
+
+ for(int outH = 0; outH < height_out; ++outH)
+ {
+ for(int outW = 0; outW < width_out; ++outW)
+ {
+ const auto in_pos = ((ba * channel_in + inC) * height_in + ((outH * block_height + shift_h))) * width_in + (outW * block_width + shift_w);
+ result[out_pos] = src[in_pos];
+ ++out_pos;
+ }
+ }
+ }
+ }
+ return result;
+}
+
+template SimpleTensor<float> space_to_depth(const SimpleTensor<float> &src, const TensorShape &dst_shape, const int block_shape);
+template SimpleTensor<half> space_to_depth(const SimpleTensor<half> &src, const TensorShape &dst_shape, const int block_shape);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/reference/SpaceToDepth.h b/tests/validation/reference/SpaceToDepth.h
new file mode 100644
index 0000000000..885c6153eb
--- /dev/null
+++ b/tests/validation/reference/SpaceToDepth.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_SPACE_TO_DEPTH_LAYER_H__
+#define __ARM_COMPUTE_TEST_SPACE_TO_DEPTH_LAYER_H__
+
+#include "tests/SimpleTensor.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> space_to_depth(const SimpleTensor<T> &src, const TensorShape &dst_shape, const int block_shape);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_SPACE_TO_DEPTH_LAYER_H__ */