aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2018-11-27 15:58:08 +0000
committerGian Marco Iodice <gianmarco.iodice@arm.com>2018-12-05 10:51:21 +0000
commit8aa985e6cd553f4e2cee6cab74b82fa626896299 (patch)
tree48fda6fb70698b497b45ec775a04147ce0c5c379 /tests
parent8fe103c35b351f2f2028782c74f0b619a744595e (diff)
downloadComputeLibrary-8aa985e6cd553f4e2cee6cab74b82fa626896299.tar.gz
COMPMID-1725: Implement Pack
Change-Id: I13f6e4c600f39355f69e015409bf30dafdc5e3aa Reviewed-on: https://review.mlplatform.org/332 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/CL/StackLayer.cpp405
-rw-r--r--tests/validation/fixtures/StackLayerFixture.h138
-rw-r--r--tests/validation/reference/StackLayer.cpp125
-rw-r--r--tests/validation/reference/StackLayer.h44
4 files changed, 712 insertions, 0 deletions
diff --git a/tests/validation/CL/StackLayer.cpp b/tests/validation/CL/StackLayer.cpp
new file mode 100644
index 0000000000..089911272a
--- /dev/null
+++ b/tests/validation/CL/StackLayer.cpp
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLStackLayer.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/CL/Helper.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/StackLayerFixture.h"
+
+#include <vector>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+// *INDENT-OFF*
+// clang-format off
+/** Data types */
+const auto data_types = framework::dataset::make("DataType", { DataType::QASYMM8, DataType::F16, DataType::F32 });
+
+/** Num tensors values to test */
+const auto n_values = framework::dataset::make("NumTensors", { 3, 4 });
+
+/** Shapes 1D to test */
+const auto shapes_1d_small = combine(datasets::Small1DShapes(), framework::dataset::make("Axis", -1, 2));
+
+/** Shapes 2D to test */
+const auto shapes_2d_small = combine(datasets::Small2DShapes(), framework::dataset::make("Axis", -2, 3));
+
+/** Shapes 3D to test */
+const auto shapes_3d_small = combine(datasets::Small3DShapes(), framework::dataset::make("Axis", -3, 4));
+
+/** Shapes 4D to test */
+const auto shapes_4d_small = combine(datasets::Small4DShapes(), framework::dataset::make("Axis", -4, 5));
+
+/** Shapes 1D to test */
+const auto shapes_1d_large = combine(datasets::Large1DShapes(), framework::dataset::make("Axis", -1, 2));
+
+/** Shapes 2D to test */
+const auto shapes_2d_large = combine(datasets::Large2DShapes(), framework::dataset::make("Axis", -2, 3));
+
+/** Shapes 3D to test */
+const auto shapes_3d_large = combine(datasets::Large3DShapes(), framework::dataset::make("Axis", -3, 4));
+
+/** Shapes 4D to test */
+const auto shapes_4d_large = combine(datasets::Large4DShapes(), framework::dataset::make("Axis", -4, 5));
+
+/** Configuration test */
+void validate_configuration(TensorShape shape_in, int axis, DataType data_type, int num_tensors)
+{
+ // Wrap around negative values
+ const unsigned int axis_u = wrap_around(axis, static_cast<int>(shape_in.num_dimensions() + 1));
+
+ const TensorShape shape_dst = compute_stack_shape(TensorInfo(shape_in, 1, data_type), axis_u, num_tensors);
+
+ std::vector<CLTensor> tensors(num_tensors);
+ std::vector<ICLTensor*> src(num_tensors);
+
+ // Create vector of input tensors
+ for(int i = 0; i < num_tensors; ++i)
+ {
+ tensors[i] = create_tensor<CLTensor>(shape_in, data_type);
+ src[i] = &(tensors[i]);
+ ARM_COMPUTE_EXPECT(src[i]->info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
+
+ // Create tensors
+ CLTensor dst = create_tensor<CLTensor>(shape_dst, data_type);
+
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ CLStackLayer stack;
+ stack.configure(src, axis, &dst);
+}
+} // namespace
+
+/** Fixture to use */
+template<typename T>
+using CLStackLayerFixture = StackLayerValidationFixture<CLTensor, ICLTensor, CLAccessor, CLStackLayer, T>;
+
+using namespace arm_compute::misc::shape_calculator;
+
+TEST_SUITE(CL)
+TEST_SUITE(StackLayer)
+TEST_SUITE(Shapes1D)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_1d_small,
+ data_types),
+ n_values),
+shape_in, axis, data_type, num_tensors)
+{
+ validate_configuration(shape_in, axis, data_type, num_tensors);
+}
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<int>, framework::DatasetMode::ALL,
+ combine(combine(shapes_1d_small,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<int>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_1d_large,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<short>, framework::DatasetMode::ALL,
+ combine(combine(shapes_1d_small,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<short>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_1d_large,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<char>, framework::DatasetMode::ALL,
+ combine(combine(shapes_1d_small,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<char>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_1d_large,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+TEST_SUITE_END() // Shapes1D
+
+TEST_SUITE(Shapes2D)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_2d_small,
+ data_types),
+ n_values),
+shape_in, axis, data_type, num_tensors)
+{
+ validate_configuration(shape_in, axis, data_type, num_tensors);
+}
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<int>, framework::DatasetMode::ALL,
+ combine(combine(shapes_2d_small,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<int>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_2d_large,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<short>, framework::DatasetMode::ALL,
+ combine(combine(shapes_2d_small,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<short>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_2d_large,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<char>, framework::DatasetMode::ALL,
+ combine(combine(shapes_2d_small,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<char>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_2d_large,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+TEST_SUITE_END() // Shapes2D
+
+TEST_SUITE(Shapes3D)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_3d_small,
+ data_types),
+ n_values),
+shape_in, axis, data_type, num_tensors)
+{
+ validate_configuration(shape_in, axis, data_type, num_tensors);
+}
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<int>, framework::DatasetMode::ALL,
+ combine(combine(shapes_3d_small,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<int>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_3d_large,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<short>, framework::DatasetMode::ALL,
+ combine(combine(shapes_3d_small,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<short>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_3d_large,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<char>, framework::DatasetMode::ALL,
+ combine(combine(shapes_3d_small,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<char>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_3d_large,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+TEST_SUITE_END() // Shapes3D
+
+TEST_SUITE(Shapes4D)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(shapes_4d_small,
+ data_types),
+ n_values),
+shape_in, axis, data_type, num_tensors)
+{
+ validate_configuration(shape_in, axis, data_type, num_tensors);
+}
+
+TEST_SUITE(S32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<int>, framework::DatasetMode::ALL,
+ combine(combine(shapes_4d_small,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<int>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_4d_large,
+ framework::dataset::make("DataType", { DataType::S32 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S32
+
+TEST_SUITE(S16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<short>, framework::DatasetMode::ALL,
+ combine(combine(shapes_4d_small,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<short>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_4d_large,
+ framework::dataset::make("DataType", { DataType::S16 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S16
+
+TEST_SUITE(S8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLStackLayerFixture<char>, framework::DatasetMode::ALL,
+ combine(combine(shapes_4d_small,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLStackLayerFixture<char>, framework::DatasetMode::NIGHTLY,
+ combine(combine(shapes_4d_large,
+ framework::dataset::make("DataType", { DataType::S8 })),
+ n_values))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END() // S8
+TEST_SUITE_END() // Shapes4D
+TEST_SUITE_END() // StackLayer
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/StackLayerFixture.h b/tests/validation/fixtures/StackLayerFixture.h
new file mode 100644
index 0000000000..cab4350787
--- /dev/null
+++ b/tests/validation/fixtures/StackLayerFixture.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE
+#define ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/StackLayer.h"
+#include "tests/validation/reference/Utils.h"
+
+#include <random>
+#include <vector>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+using namespace arm_compute::misc::shape_calculator;
+
+template <typename TensorType, typename AbstractTensorType, typename AccessorType, typename FunctionType, typename T>
+class StackLayerValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape_src, int axis, DataType data_type, int num_tensors)
+ {
+ _target = compute_target(shape_src, axis, data_type, num_tensors);
+ _reference = compute_reference(shape_src, axis, data_type, num_tensors);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, unsigned int i)
+ {
+ library->fill_tensor_uniform(tensor, i);
+ }
+
+ TensorType compute_target(TensorShape shape_src, int axis, DataType data_type, int num_tensors)
+ {
+ std::vector<TensorType> tensors(num_tensors);
+ std::vector<AbstractTensorType *> src(num_tensors);
+
+ // Create vector of input tensors
+ for(int i = 0; i < num_tensors; ++i)
+ {
+ tensors[i] = create_tensor<TensorType>(shape_src, data_type);
+ src[i] = &(tensors[i]);
+ ARM_COMPUTE_EXPECT(tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
+
+ // Create tensors
+ CLTensor dst;
+
+ // The output tensor will be auto-initialized within the function
+
+ // Create and configure function
+ FunctionType stack;
+ stack.configure(src, axis, &dst);
+
+ // Allocate and fill the input tensors
+ for(int i = 0; i < num_tensors; ++i)
+ {
+ ARM_COMPUTE_EXPECT(tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS);
+ tensors[i].allocator()->allocate();
+ ARM_COMPUTE_EXPECT(!tensors[i].info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill input tensor
+ fill(AccessorType(tensors[i]), i);
+ }
+
+ // Allocate output tensor
+ dst.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Compute stack function
+ stack.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(const TensorShape &shape_src, int axis, DataType data_type, int num_tensors)
+ {
+ std::vector<SimpleTensor<T>> src;
+
+ for(int i = 0; i < num_tensors; ++i)
+ {
+ src.emplace_back(std::move(SimpleTensor<T>(shape_src, data_type, 1)));
+
+ fill(src[i], i);
+ }
+
+ // Wrap around negative values
+ const unsigned int axis_u = wrap_around(axis, static_cast<int>(shape_src.num_dimensions() + 1));
+
+ const TensorShape shape_dst = compute_stack_shape(TensorInfo(shape_src, 1, data_type), axis_u, num_tensors);
+
+ return reference::stack_layer<T>(src, shape_dst, data_type, axis_u);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_STACK_LAYER_FIXTURE */
diff --git a/tests/validation/reference/StackLayer.cpp b/tests/validation/reference/StackLayer.cpp
new file mode 100644
index 0000000000..50e440c914
--- /dev/null
+++ b/tests/validation/reference/StackLayer.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "StackLayer.h"
+
+#include "arm_compute/core/Types.h"
+
+#include "tests/validation/Helpers.h"
+
+#include <vector>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> stack_layer(const std::vector<SimpleTensor<T>> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis)
+{
+ ARM_COMPUTE_ERROR_ON(output_shape.num_dimensions() > 5);
+ ARM_COMPUTE_ERROR_ON(in.size() < 2);
+ ARM_COMPUTE_ERROR_ON(axis > in[0].shape().num_dimensions());
+
+ SimpleTensor<T> out{ output_shape, data_type };
+
+ const int width = in[0].shape()[0];
+ const int height = in[0].shape()[1];
+ const int depth = in[0].shape()[2];
+ const int batch_size = in[0].shape()[3];
+ const int num_tensors = in.size();
+
+ // Array to store the input coordinates
+ // i_coordinates[0] = xi, i_coordinates[1] = yi, i_coordinates[2] = zi
+ // i_coordinates[3] = bi, i_coordinates[4] = i, i_coordinates[5] = 0
+ // i_coordinates[5] will be always zero and used for not incrementing the output when the input has less than 4 dimensions
+ int i_coordinates[6] = { 0 };
+
+ // Array of pointers used to map the output coordinates to the input ones accordingly with the axis
+ // This array is initialized with &i_coordinates[5] since this will be always zero
+ int *o_coordinates[5] = { &i_coordinates[5], &i_coordinates[5], &i_coordinates[5], &i_coordinates[5], &i_coordinates[5] };
+
+ // Set the axis coordinate
+ o_coordinates[axis] = &i_coordinates[4];
+
+ unsigned int k_shift = 0;
+
+ // Map the output coordinates
+ for(unsigned int k = 0; k < in[0].shape().num_dimensions(); ++k)
+ {
+ if(k == axis)
+ {
+ k_shift++;
+ }
+
+ o_coordinates[k + k_shift] = &i_coordinates[k];
+ }
+
+ // Use alias for the input coordinates
+ int &xi = i_coordinates[0];
+ int &yi = i_coordinates[1];
+ int &zi = i_coordinates[2];
+ int &bi = i_coordinates[3];
+ int &i = i_coordinates[4];
+
+ // Use alias for the output coordinates
+ int &xo = *(o_coordinates[0]);
+ int &yo = *(o_coordinates[1]);
+ int &zo = *(o_coordinates[2]);
+ int &bo = *(o_coordinates[3]);
+ int &wo = *(o_coordinates[4]);
+
+ // Stack tensors
+ for(; i < num_tensors; ++(i))
+ {
+ bi = 0;
+ for(; bi < batch_size; ++(bi))
+ {
+ zi = 0;
+ for(; zi < depth; ++(zi))
+ {
+ yi = 0;
+ for(; yi < height; ++(yi))
+ {
+ xi = 0;
+ for(; xi < width; ++(xi))
+ {
+ *(reinterpret_cast<T *>(out(Coordinates(xo, yo, zo, bo, wo)))) = *(reinterpret_cast<const T *>(in[i](Coordinates(xi, yi, zi, bi))));
+ }
+ }
+ }
+ }
+ }
+
+ return out;
+}
+template SimpleTensor<int> stack_layer(const std::vector<SimpleTensor<int>> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis);
+template SimpleTensor<short> stack_layer(const std::vector<SimpleTensor<short>> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis);
+template SimpleTensor<char> stack_layer(const std::vector<SimpleTensor<char>> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/reference/StackLayer.h b/tests/validation/reference/StackLayer.h
new file mode 100644
index 0000000000..453f176a9d
--- /dev/null
+++ b/tests/validation/reference/StackLayer.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_STACK_LAYER_H__
+#define __ARM_COMPUTE_TEST_STACK_LAYER_H__
+
+#include "tests/SimpleTensor.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> stack_layer(const std::vector<SimpleTensor<T>> &in, const TensorShape &output_shape, DataType data_type, unsigned int axis);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_STACK_LAYER_H__ */