aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2018-11-27 15:58:08 +0000
committerGian Marco Iodice <gianmarco.iodice@arm.com>2018-12-05 10:51:21 +0000
commit8aa985e6cd553f4e2cee6cab74b82fa626896299 (patch)
tree48fda6fb70698b497b45ec775a04147ce0c5c379
parent8fe103c35b351f2f2028782c74f0b619a744595e (diff)
downloadComputeLibrary-8aa985e6cd553f4e2cee6cab74b82fa626896299.tar.gz
COMPMID-1725: Implement Pack
Change-Id: I13f6e4c600f39355f69e015409bf30dafdc5e3aa Reviewed-on: https://review.mlplatform.org/332 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
-rw-r--r--arm_compute/core/CL/CLKernels.h1
-rw-r--r--arm_compute/core/CL/kernels/CLStackLayerKernel.h83
-rw-r--r--arm_compute/core/Helpers.h12
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h22
-rw-r--r--arm_compute/runtime/CL/CLFunctions.h1
-rw-r--r--arm_compute/runtime/CL/functions/CLStackLayer.h77
-rw-r--r--src/core/CL/CLKernelLibrary.cpp5
-rw-r--r--src/core/CL/cl_kernels/stack_layer.cl113
-rw-r--r--src/core/CL/kernels/CLStackLayerKernel.cpp145
-rw-r--r--src/runtime/CL/functions/CLStackLayer.cpp85
-rw-r--r--tests/validation/CL/StackLayer.cpp405
-rw-r--r--tests/validation/fixtures/StackLayerFixture.h138
-rw-r--r--tests/validation/reference/StackLayer.cpp125
-rw-r--r--tests/validation/reference/StackLayer.h44
14 files changed, 1256 insertions, 0 deletions
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index c707265c23..0fe1e04ccd 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -121,6 +121,7 @@
#include "arm_compute/core/CL/kernels/CLSobel7x7Kernel.h"
#include "arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLSpaceToBatchLayerKernel.h"
+#include "arm_compute/core/CL/kernels/CLStackLayerKernel.h"
#include "arm_compute/core/CL/kernels/CLStridedSliceKernel.h"
#include "arm_compute/core/CL/kernels/CLTableLookupKernel.h"
#include "arm_compute/core/CL/kernels/CLThresholdKernel.h"
diff --git a/arm_compute/core/CL/kernels/CLStackLayerKernel.h b/arm_compute/core/CL/kernels/CLStackLayerKernel.h
new file mode 100644
index 0000000000..4d377daf8b
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLStackLayerKernel.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __ARM_COMPUTE_CLSTACKLAYERKERNEL_H__
+#define __ARM_COMPUTE_CLSTACKLAYERKERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** OpenCL kernel to stacks a rank-R tensor into one with rank-(R+1) along the axis dimension.*/
+class CLStackLayerKernel : public ICLKernel
+{
+public:
+ /** Default constructor */
+ CLStackLayerKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLStackLayerKernel(const CLStackLayerKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLStackLayerKernel &operator=(const CLStackLayerKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ CLStackLayerKernel(CLStackLayerKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ CLStackLayerKernel &operator=(CLStackLayerKernel &&) = default;
+ /** Default destructor */
+ ~CLStackLayerKernel() = default;
+ /** Initialise the kernel's inputs and output
+ *
+ * @param[in] input Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] axis The dimension to stack the tensors along. It must be smaller than the number of input dimensions.
+ * @param[in] idx_input Index of the input tensor in the list of tensors to stack.
+ * All tensors in the list must have the same shape
+ * @param[in] num_tensors Number of tensors to stack
+ * @param[out] output Output tensor. Data types supported: Same as @p input.
+ *
+ */
+ void configure(const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLStackLayerKernel
+ *
+ * @param[in] input Input tensor info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] axis The dimension to stack the tensors along. It must be smaller than the number of input dimensions.
+ * @param[in] idx_input Index of the input tensor in the list of tensors to stack
+ * All tensors in the list must have the same shape
+ * @param[in] num_tensors Number of tensors to stack
+ * @param[in] output Output tensor info. Data types supported: Same as @p input.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, const ITensorInfo *output);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ const ICLTensor *_input;
+ ICLTensor *_output;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_CLSTACKLAYERKERNEL_H__ */
diff --git a/arm_compute/core/Helpers.h b/arm_compute/core/Helpers.h
index ef59323073..8f4220fb80 100644
--- a/arm_compute/core/Helpers.h
+++ b/arm_compute/core/Helpers.h
@@ -710,6 +710,18 @@ inline Size2D compute_winograd_convolution_tiles(const Size2D &in_dims, const Si
return Size2D(num_tiles_x, num_tiles_y);
}
+/** Wrap-around a number within the range 0 <= x < m
+ *
+ * @param[in] x Input value
+ * @param[in] m Range
+ *
+ * @return the wrapped-around number
+ */
+template <typename T>
+inline T wrap_around(T x, T m)
+{
+ return x >= 0 ? x % m : (x % m + m) % m;
+}
} // namespace arm_compute
#include "arm_compute/core/Helpers.inl"
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 38906dfc9b..c625a07a7f 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -754,6 +754,28 @@ inline TensorShape calculate_width_concatenate_shape(const std::vector<T *> &inp
return out_shape;
}
+
+inline TensorShape compute_stack_shape(const ITensorInfo &a, unsigned int axis, unsigned int num_tensors)
+{
+ ARM_COMPUTE_ERROR_ON(axis > a.num_dimensions());
+ ARM_COMPUTE_ERROR_ON(a.num_dimensions() > 4);
+
+ TensorShape shape_out{ a.tensor_shape() };
+ shape_out.set(axis, num_tensors);
+
+ unsigned int i_shift = 0;
+
+ for(unsigned int i = 0; i < a.num_dimensions(); ++i)
+ {
+ if(i == axis)
+ {
+ i_shift++;
+ }
+
+ shape_out.set(i + i_shift, a.tensor_shape()[i]);
+ }
+ return shape_out;
+}
} // namespace shape_calculator
} // namespace misc
} // namespace arm_compute
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index e68e719a13..d340ce1e0c 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -125,6 +125,7 @@
#include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h"
#include "arm_compute/runtime/CL/functions/CLSpaceToBatchLayer.h"
#include "arm_compute/runtime/CL/functions/CLSplit.h"
+#include "arm_compute/runtime/CL/functions/CLStackLayer.h"
#include "arm_compute/runtime/CL/functions/CLStridedSlice.h"
#include "arm_compute/runtime/CL/functions/CLTableLookup.h"
#include "arm_compute/runtime/CL/functions/CLThreshold.h"
diff --git a/arm_compute/runtime/CL/functions/CLStackLayer.h b/arm_compute/runtime/CL/functions/CLStackLayer.h
new file mode 100644
index 0000000000..9794014889
--- /dev/null
+++ b/arm_compute/runtime/CL/functions/CLStackLayer.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLSTACKLAYER_H__
+#define __ARM_COMPUTE_CLSTACKLAYER_H__
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/IFunction.h"
+
+#include "arm_compute/core/CL/kernels/CLStackLayerKernel.h"
+
+#include <memory>
+#include <vector>
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Basic function to stack tensors along an axis. This function calls the following kernel:
+ *
+ * -# @ref CLStackLayerKernel
+ *
+ */
+class CLStackLayer : public IFunction
+{
+public:
+ /** Default constructor */
+ CLStackLayer();
+ /** Initialise the kernel's inputs vector and output.
+ *
+ * @param[in] input The vectors containing all the tensors with the same shape to stack. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] axis The dimension to stack the tensors along. It must be smaller than the number of input dimensions.
+ * Negative values wrap around
+ * @param[out] output Output tensor. Data types supported: Same as @p input.
+ */
+ void configure(const std::vector<ICLTensor *> &input, int axis, ICLTensor *output);
+ /** Static function to check if given info will lead to a valid configuration of @ref CLDepthConcatenateLayer
+ *
+ * @param[in] input The vectors containing all the tensors info with the same shape to stack. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] axis The dimension to stack the tensors along. It must be smaller than the number of input dimensions.
+ * Negative values wrap around
+ * @param[in] output Output tensor info. Data types supported: Same as @p input.
+ *
+ * @return a status
+ */
+ static Status validate(const std::vector<ITensorInfo *> &input, int axis, const ITensorInfo *output);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ std::vector<ICLTensor *> _input;
+ std::unique_ptr<CLStackLayerKernel[]> _stack_kernels;
+ unsigned int _num_inputs;
+};
+}
+#endif /* __ARM_COMPUTE_CLSTACKLAYER_H__ */
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index ac1d4b349e..e48ff03e05 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -423,6 +423,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "space_to_batch_nhwc", "space_to_batch.cl" },
{ "space_to_batch_static_nhwc", "space_to_batch.cl" },
{ "softmax_layer_max_shift_exp_sum_parallel", "softmax_layer.cl" },
+ { "stack_layer", "stack_layer.cl" },
{ "strided_slice", "slice_ops.cl" },
{ "suppress_non_maximum", "canny.cl" },
{ "tablelookup_U8", "tablelookup.cl" },
@@ -865,6 +866,10 @@ const std::map<std::string, std::string> CLKernelLibrary::_program_source_map =
#include "./cl_kernels/space_to_batch.clembed"
},
{
+ "stack_layer.cl",
+#include "./cl_kernels/stack_layer.clembed"
+ },
+ {
"tablelookup.cl",
#include "./cl_kernels/tablelookup.clembed"
},
diff --git a/src/core/CL/cl_kernels/stack_layer.cl b/src/core/CL/cl_kernels/stack_layer.cl
new file mode 100644
index 0000000000..bed62662ad
--- /dev/null
+++ b/src/core/CL/cl_kernels/stack_layer.cl
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(DATA_TYPE) && defined(AXIS) && defined(SRC_DIM2) && defined(DST_DIM3)
+
+#if AXIS == 0
+#define X_DST (idx_input)
+#define Y_DST (x_src)
+#define Z_DST (y_src)
+#define W_DST (z_src)
+#define K_DST (w_src)
+#elif AXIS == 1 // AXIS == 1
+#define X_DST (x_src)
+#define Y_DST (idx_input)
+#define Z_DST (y_src)
+#define W_DST (z_src)
+#define K_DST (w_src)
+#elif AXIS == 2 // AXIS == 2
+#define X_DST (x_src)
+#define Y_DST (y_src)
+#define Z_DST (idx_input)
+#define W_DST (z_src)
+#define K_DST (w_src)
+#elif AXIS == 3 // AXIS == 3
+#define X_DST (x_src)
+#define Y_DST (y_src)
+#define Z_DST (z_src)
+#define W_DST (idx_input)
+#define K_DST (w_src)
+#elif AXIS == 4 // AXIS == 4
+#define X_DST (x_src)
+#define Y_DST (y_src)
+#define Z_DST (z_src)
+#define W_DST (w_src)
+#define K_DST (idx_input)
+#else // AXIS not supported
+#error "Not supported axis"
+#endif // AXIS == 0
+
+/** OpenCL kernel to stack a rank-R tensor into one with rank-(R+1) along the axis dimension
+ *
+ * @note The data type has to be passed at compile time using -DDATA_TYPE. i.e. -DDATA_TYPE=float
+ * @note The dimension to stack the tensors along has to be passed at compile time using -DAXIS. i.e. -DAXIS=1
+ * @note Dimension 2 of the input tensor must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM2=112)
+ * @note Dimension 3 of the output tensor must be passed at compile time using -DDST_DIM3 (e.g. -DDST_DIM3=112)
+ *
+ * @param[in] src_ptr Pointer to the source tensor. Supported data types: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
+ * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)
+ * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)
+ * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor
+ * @param[out] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr
+ * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] dst_stride_z Stride of the source tensor in Z dimension (in bytes)
+ * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] dst_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] idx_input Index of the input tensor in the list of tensors to stack
+ */
+__kernel void stack_layer(
+ TENSOR4D_DECLARATION(src),
+ TENSOR4D_DECLARATION(dst),
+ unsigned int idx_input)
+{
+ uint x_src = get_global_id(0);
+ uint y_src = get_global_id(1);
+ uint z_src = (get_global_id(2) % SRC_DIM2);
+ uint w_src = (get_global_id(2) / SRC_DIM2);
+
+ __global DATA_TYPE *src = (__global DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + x_src * sizeof(DATA_TYPE) + y_src * src_stride_y + z_src * src_stride_z + w_src * src_stride_w);
+
+ __global DATA_TYPE *dst = (__global DATA_TYPE *)(dst_ptr + dst_offset_first_element_in_bytes + X_DST * sizeof(DATA_TYPE) + Y_DST * dst_stride_y + Z_DST * dst_stride_z + W_DST * dst_stride_w + K_DST *
+ dst_stride_w * (uint)DST_DIM3);
+
+ *dst = *src;
+}
+
+#undef X_DST
+#undef Y_DST
+#undef Z_DST
+#undef W_DST
+#endif // defined(DATA_TYPE) && defined(AXIS) && defined(SRC_DIM2) && defined(DST_DIM3)
diff --git a/src/core/CL/kernels/CLStackLayerKernel.cpp b/src/core/CL/kernels/CLStackLayerKernel.cpp
new file mode 100644
index 0000000000..bac8992f7b
--- /dev/null
+++ b/src/core/CL/kernels/CLStackLayerKernel.cpp
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLStackLayerKernel.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/CLValidate.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/CL/OpenCL.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/IAccessWindow.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute;
+using namespace arm_compute::misc::shape_calculator;
+
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, const ITensorInfo *output)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::U8, DataType::S8,
+ DataType::U16, DataType::S16, DataType::U32, DataType::S32,
+ DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON(idx_input >= num_tensors);
+ ARM_COMPUTE_RETURN_ERROR_ON(axis > input->num_dimensions());
+ ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 4);
+
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), compute_stack_shape(*input, axis, num_tensors));
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ }
+
+ return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, unsigned int axis, unsigned int num_tensors, ITensorInfo *output)
+{
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*output, input->clone()->set_tensor_shape(compute_stack_shape(*input, axis, num_tensors)));
+
+ // Configure kernel window
+ constexpr unsigned int num_elems_processed_per_iteration = 1;
+
+ // The window needs to be based on input as we copy all the depths of input
+ Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
+
+ AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
+
+ bool window_changed = update_window_and_padding(win, input_access, output_access);
+ output_access.set_valid_region(win, ValidRegion(Coordinates(), output->tensor_shape()));
+
+ Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+ return std::make_pair(err, win);
+}
+} // namespace
+
+CLStackLayerKernel::CLStackLayerKernel()
+ : _input(nullptr), _output(nullptr)
+{
+}
+
+void CLStackLayerKernel::configure(const ICLTensor *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, ICLTensor *output)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), axis, idx_input, num_tensors, output->info()));
+
+ _input = input;
+ _output = output;
+
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(input->info(), axis, num_tensors, output->info());
+
+ // Add build options
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_underlying_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DAXIS=" + support::cpp11::to_string(axis));
+ build_opts.add_option("-DSRC_DIM2=" + support::cpp11::to_string(input->info()->dimension(2)));
+ build_opts.add_option("-DDST_DIM3=" + support::cpp11::to_string(output->info()->dimension(3)));
+
+ // Create kernel
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("stack_layer", build_opts.options()));
+
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ ICLKernel::configure_internal(win_config.second);
+
+ const unsigned int idx = 2 * num_arguments_per_4D_tensor();
+ _kernel.setArg<cl_uint>(idx, idx_input);
+}
+
+Status CLStackLayerKernel::validate(const ITensorInfo *input, unsigned int axis, unsigned int idx_input, unsigned int num_tensors, const ITensorInfo *output)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, axis, idx_input, num_tensors, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), axis, num_tensors, output->clone().get()).first);
+ return Status{};
+}
+
+void CLStackLayerKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+
+ Window window_out;
+ window_out.use_tensor_dimensions(_output->info()->tensor_shape());
+
+ Window collapsed = window.collapse(ICLKernel::window(), Window::DimZ);
+
+ Window slice_in = collapsed.first_slice_window_4D();
+ Window slice_out = window_out.first_slice_window_4D();
+
+ unsigned int idx = 0;
+ add_4D_tensor_argument(idx, _input, slice_in);
+ add_4D_tensor_argument(idx, _output, slice_out);
+ enqueue(queue, *this, slice_in);
+}
diff --git a/src/runtime/CL/functions/CLStackLayer.cpp b/src/runtime/CL/functions/CLStackLayer.cpp
new file mode 100644
index 0000000000..85adcad90c
--- /dev/null
+++ b/src/runtime/CL/functions/CLStackLayer.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <complex>
+
+#include "arm_compute/runtime/CL/functions/CLStackLayer.h"
+
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute;
+
+CLStackLayer::CLStackLayer() // NOLINT
+ : _input(),
+ _stack_kernels(),
+ _num_inputs(0)
+{
+}
+
+void CLStackLayer::configure(const std::vector<ICLTensor *> &input, int axis, ICLTensor *output)
+{
+ _num_inputs = input.size();
+ _stack_kernels = arm_compute::support::cpp14::make_unique<CLStackLayerKernel[]>(_num_inputs);
+
+ // Wrap around negative values
+ const unsigned int axis_u = wrap_around(axis, static_cast<int>(input[0]->info()->num_dimensions() + 1));
+
+ for(unsigned int i = 0; i < _num_inputs; i++)
+ {
+ _stack_kernels[i].configure(input[i], axis_u, i, _num_inputs, output);
+ }
+}
+
+Status CLStackLayer::validate(const std::vector<ITensorInfo *> &input, int axis, const ITensorInfo *output)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
+ ARM_COMPUTE_RETURN_ERROR_ON(input.size() < 2);
+
+ // Wrap around negative values
+ const unsigned int axis_u = wrap_around(axis, static_cast<int>(input[0]->num_dimensions() + 1));
+
+ const unsigned int num_inputs = input.size();
+
+ for(unsigned int i = 0; i < num_inputs; i++)
+ {
+ ARM_COMPUTE_RETURN_ON_ERROR(CLStackLayerKernel::validate(input[i], axis_u, i, num_inputs, output));
+ }
+
+ return Status{};
+}
+
+void CLStackLayer::run()
+{
+ for(unsigned i = 0; i < _num_inputs; i++)
+ {
+ CLScheduler::get().enqueue(_stack_kernels[i], false);
+ }
+}
diff --git a/tests/validation/CL/StackLayer.cpp b/tests/validation/CL/StackLayer.cpp
new file mode 100644
index 0000000000..089911272a
--- /dev/null
+++ b/tests/validation/CL/StackLayer.cpp
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLStackLayer.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/CL/Helper.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/StackLayerFixture.h"
+
+#include <vector>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+// *INDENT-OFF*
+// clang-format off
+/** Data types */
+const auto data_types = framework::dataset::make("DataType", { DataType::QASYMM8, DataType::F16, DataType::F32 });
+
+/** Num tensors values to test */
+const auto n_values = framework::dataset::make("NumTensors", { 3, 4 });
+
+/** Shapes 1D to test */
+const auto shapes_1d_small = combine(datasets::Small1DShapes(), framework::dataset::make("Axis", -1, 2));
+
+/** Shapes 2D to test */
+const auto shapes_2d_small = combine(datasets::Small2DShapes(), framework::dataset::make("Axis", -2, 3));
+
+/** Shapes 3D to test */
+const auto shapes_3d_small = combine(datasets::Small3DShapes(), framework::dataset::make("Axis", -3, 4));
+
+/** Shapes 4D to test */
+const auto shapes_4d_small = combine(datasets::Small4DShapes(), framework::dataset::make("Axis", -4, 5));
+
+/** Shapes 1D to test */
+const auto shapes_1d_large = combine(datasets::Large1DShapes(), framework::dataset::make("Axis", -1, 2));
+
+/** Shapes 2D to test */
+const auto shapes_2d_large = combine(datasets::Large2DShapes(), framework::dataset::make("Axis", -2, 3));
+
+/** Shapes 3D to test */
+const auto shapes_3d_large = combine(datasets::Large3DShapes(), framework::dataset::make("Axis", -3, 4));
+
+/** Shapes 4D to test */
+const auto shapes_4d_large = combine(datasets::Large4DShapes(), framework::dataset::make("Axis", -4, 5));
+
+/** Configuration test */
+void validate_configuration(TensorShape shape_in, int axis, DataType data_type, int num_tensors)
+{
+ // Wrap around negative values
+ const unsigned int axis_u = wrap_around(axis, static_cast<int>(shape_in.num_dimensions() + 1));
+
+ const TensorShape shape_dst = compute_stack_shape(TensorInfo(shape_in, 1, data_type), axis_u, num_tensors);
+
+ std::vector<CLTensor> tensors(num_tensors);
+ std::vector<ICLTensor*> src(num_tensors);
+
+ // Create vector of input tensors
+ for(int i = 0; i < num_tensors; ++i)
+ {
+ tensors[i] = create_tensor<CLTensor>(shape_in, data_type);
+ src[i] = &(tensors[i]);
+ ARM_COMPUTE_EXPECT(src[i]->info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
+
+ // Create tensors
+ CLTensor dst = create_tensor<CLTensor>(shape_dst, data_type);
+
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ CLStackLayer stack;
+ stack.configure(src, axis, &dst);
+}
+} // namespace
+
+/** Fixture to use */
+template<typename T>
+using CLStackLayerFixture = StackLayerValidationFixture<CLTensor, ICLTensor, CLAccessor, CLStackLayer, T>;
+
+using namespace arm_compute::misc::shape_calculator;
+
+TEST_SUITE(CL)
+TEST_SUITE(StackLayer)
+TEST_SUITE(Shapes1D)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_1d_small,
+ data_types),
+ n_values),
+shape_in, axis, data_type, num_tensors)
+{
+ validate_configuration(shape_in, axis, data_type, num_tensors);
+}
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<int>, framework::DatasetMode::ALL,
+ combine(combine(shapes_1d_small,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<int>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_1d_large,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<short>, framework::DatasetMode::ALL,
+ combine(combine(shapes_1d_small,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<short>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_1d_large,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<char>, framework::DatasetMode::ALL,
+ combine(combine(shapes_1d_small,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<char>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_1d_large,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+TEST_SUITE_END() // Shapes1D
+
+TEST_SUITE(Shapes2D)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_2d_small,
+ data_types),
+ n_values),
+shape_in, axis, data_type, num_tensors)
+{
+ validate_configuration(shape_in, axis, data_type, num_tensors);
+}
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<int>, framework::DatasetMode::ALL,
+ combine(combine(shapes_2d_small,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<int>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_2d_large,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<short>, framework::DatasetMode::ALL,
+ combine(combine(shapes_2d_small,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<short>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_2d_large,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<char>, framework::DatasetMode::ALL,
+ combine(combine(shapes_2d_small,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<char>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_2d_large,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+TEST_SUITE_END() // Shapes2D
+
+TEST_SUITE(Shapes3D)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_3d_small,
+ data_types),
+ n_values),
+shape_in, axis, data_type, num_tensors)
+{
+ validate_configuration(shape_in, axis, data_type, num_tensors);
+}
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<int>, framework::DatasetMode::ALL,
+ combine(combine(shapes_3d_small,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<int>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_3d_large,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<short>, framework::DatasetMode::ALL,
+ combine(combine(shapes_3d_small,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<short>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_3d_large,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<char>, framework::DatasetMode::ALL,
+ combine(combine(shapes_3d_small,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<char>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_3d_large,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+TEST_SUITE_END() // Shapes3D
+
+TEST_SUITE(Shapes4D)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_4d_small,
+ data_types),
+ n_values),
+shape_in, axis, data_type, num_tensors)
+{
+ validate_configuration(shape_in, axis, data_type, num_tensors);
+}
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<int>, framework::DatasetMode::ALL,
+ combine(combine(shapes_4d_small,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<int>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_4d_large,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<short>, framework::DatasetMode::ALL,
+ combine(combine(shapes_4d_small,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<short>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_4d_large,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<char>, framework::DatasetMode::ALL,
+ combine(combine(shapes_4d_small,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<char>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_4d_large,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+TEST_SUITE_END() // Shapes4D
+TEST_SUITE_END() // StackLayer
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/StackLayerFixture.h b/tests/validation/fixtures/StackLayerFixture.h
new file mode 100644
index 0000000000..cab4350787
--- /dev/null
+++ b/tests/validation/fixtures/StackLayerFixture.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE
+#define ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/StackLayer.h"
+#include "tests/validation/reference/Utils.h"
+
+#include <random>
+#include <vector>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+using namespace arm_compute::misc::shape_calculator;
+
+template <typename TensorType, typename AbstractTensorType, typename AccessorType, typename FunctionType, typename T>
+class StackLayerValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape_src, int axis, DataType data_type, int num_tensors)
+ {
+ _target = compute_target(shape_src, axis, data_type, num_tensors);
+ _reference = compute_reference(shape_src, axis, data_type, num_tensors);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, unsigned int i)
+ {
+ library->fill_tensor_uniform(tensor, i);
+ }
+
+ TensorType compute_target(TensorShape shape_src, int axis, DataType data_type, int num_tensors)
+ {
+ std::vector<TensorType> tensors(num_tensors);
+ std::vector<AbstractTensorType *> src(num_tensors);
+
+ // Create vector of input tensors
+ for(int i = 0; i < num_tensors; ++i)
+ {
+ tensors[i] = create_tensor<TensorType>(shape_src, data_type);
+ src[i] = &(tensors[i]);
+ ARM_COMPUTE_EXPECT(tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
+
+ // Create tensors
+ CLTensor dst;
+
+ // The output tensor will be auto-initialized within the function
+
+ // Create and configure function
+ FunctionType stack;
+ stack.configure(src, axis, &dst);
+
+ // Allocate and fill the input tensors
+ for(int i = 0; i < num_tensors; ++i)
+ {
+ ARM_COMPUTE_EXPECT(tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS);
+ tensors[i].allocator()->allocate();
+ ARM_COMPUTE_EXPECT(!tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill input tensor
+ fill(AccessorType(tensors[i]), i);
+ }
+
+ // Allocate output tensor
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Compute stack function
+ stack.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape_src, int axis, DataType data_type, int num_tensors)
+ {
+ std::vector<SimpleTensor<T>> src;
+
+ for(int i = 0; i < num_tensors; ++i)
+ {
+ src.emplace_back(std::move(SimpleTensor<T>(shape_src, data_type, 1)));
+
+ fill(src[i], i);
+ }
+
+ // Wrap around negative values
+ const unsigned int axis_u = wrap_around(axis, static_cast<int>(shape_src.num_dimensions() + 1));
+
+ const TensorShape shape_dst = compute_stack_shape(TensorInfo(shape_src, 1, data_type), axis_u, num_tensors);
+
+ return reference::stack_layer<T>(src, shape_dst, data_type, axis_u);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE */
diff --git a/tests/validation/reference/StackLayer.cpp b/tests/validation/reference/StackLayer.cpp
new file mode 100644
index 0000000000..50e440c914
--- /dev/null
+++ b/tests/validation/reference/StackLayer.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "StackLayer.h"
+
+#include "arm_compute/core/Types.h"
+
+#include "tests/validation/Helpers.h"
+
+#include <vector>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> stack_layer(const std::vector<SimpleTensor<T>> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis)
+{
+ ARM_COMPUTE_ERROR_ON(output_shape.num_dimensions() > 5);
+ ARM_COMPUTE_ERROR_ON(in.size() < 2);
+ ARM_COMPUTE_ERROR_ON(axis > in[0].shape().num_dimensions());
+
+ SimpleTensor<T> out{ output_shape, data_type };
+
+ const int width = in[0].shape()[0];
+ const int height = in[0].shape()[1];
+ const int depth = in[0].shape()[2];
+ const int batch_size = in[0].shape()[3];
+ const int num_tensors = in.size();
+
+ // Array to store the input coordinates
+ // i_coordinates[0] = xi, i_coordinates[1] = yi, i_coordinates[2] = zi
+ // i_coordinates[3] = bi, i_coordinates[4] = i, i_coordinates[5] = 0
+ // i_coordinates[5] will be always zero and used for not incrementing the output when the input has less than 4 dimensions
+ int i_coordinates[6] = { 0 };
+
+ // Array of pointers used to map the output coordinates to the input ones accordingly with the axis
+ // This array is initialized with &i_coordinates[5] since this will be always zero
+ int *o_coordinates[5] = { &i_coordinates[5], &i_coordinates[5], &i_coordinates[5], &i_coordinates[5], &i_coordinates[5] };
+
+ // Set the axis coordinate
+ o_coordinates[axis] = &i_coordinates[4];
+
+ unsigned int k_shift = 0;
+
+ // Map the output coordinates
+ for(unsigned int k = 0; k < in[0].shape().num_dimensions(); ++k)
+ {
+ if(k == axis)
+ {
+ k_shift++;
+ }
+
+ o_coordinates[k + k_shift] = &i_coordinates[k];
+ }
+
+ // Use alias for the input coordinates
+ int &xi = i_coordinates[0];
+ int &yi = i_coordinates[1];
+ int &zi = i_coordinates[2];
+ int &bi = i_coordinates[3];
+ int &i = i_coordinates[4];
+
+ // Use alias for the output coordinates
+ int &xo = *(o_coordinates[0]);
+ int &yo = *(o_coordinates[1]);
+ int &zo = *(o_coordinates[2]);
+ int &bo = *(o_coordinates[3]);
+ int &wo = *(o_coordinates[4]);
+
+ // Stack tensors
+ for(; i < num_tensors; ++(i))
+ {
+ bi = 0;
+ for(; bi < batch_size; ++(bi))
+ {
+ zi = 0;
+ for(; zi < depth; ++(zi))
+ {
+ yi = 0;
+ for(; yi < height; ++(yi))
+ {
+ xi = 0;
+ for(; xi < width; ++(xi))
+ {
+ *(reinterpret_cast<T *>(out(Coordinates(xo, yo, zo, bo, wo)))) = *(reinterpret_cast<const T *>(in[i](Coordinates(xi, yi, zi, bi))));
+ }
+ }
+ }
+ }
+ }
+
+ return out;
+}
+template SimpleTensor<int> stack_layer(const std::vector<SimpleTensor<int>> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis);
+template SimpleTensor<short> stack_layer(const std::vector<SimpleTensor<short>> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis);
+template SimpleTensor<char> stack_layer(const std::vector<SimpleTensor<char>> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/reference/StackLayer.h b/tests/validation/reference/StackLayer.h
new file mode 100644
index 0000000000..453f176a9d
--- /dev/null
+++ b/tests/validation/reference/StackLayer.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_STACK_LAYER_H__
+#define __ARM_COMPUTE_TEST_STACK_LAYER_H__
+
+#include "tests/SimpleTensor.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> stack_layer(const std::vector<SimpleTensor<T>> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_STACK_LAYER_H__ */