aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2018-05-09 09:59:23 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:52:35 +0000
commit55b3d1216b4011d86d5f06335e518dc924987ae5 (patch)
treea94ca420f4385ff301a770fbeb5b4f930ac60105 /tests/validation
parenta8aef2916379402e241d9f2c5e0faf3f99c860f7 (diff)
downloadComputeLibrary-55b3d1216b4011d86d5f06335e518dc924987ae5.tar.gz
COMPMID-1137 OpenCL concatenate width
Change-Id: I40faba421281b1cf080fa6a825d04a4366cdaeb0 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/130700 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/CL/WidthConcatenateLayer.cpp176
-rw-r--r--tests/validation/Helpers.cpp15
-rw-r--r--tests/validation/Helpers.h8
-rw-r--r--tests/validation/fixtures/WidthConcatenateLayerFixture.h160
-rw-r--r--tests/validation/reference/WidthConcatenateLayer.cpp93
-rw-r--r--tests/validation/reference/WidthConcatenateLayer.h45
6 files changed, 497 insertions, 0 deletions
diff --git a/tests/validation/CL/WidthConcatenateLayer.cpp b/tests/validation/CL/WidthConcatenateLayer.cpp
new file mode 100644
index 0000000000..0ff95df957
--- /dev/null
+++ b/tests/validation/CL/WidthConcatenateLayer.cpp
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/WidthConcatenateLayerFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(CL)
+TEST_SUITE(WidthConcatenateLayer)
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+ framework::dataset::make("InputInfo1", { TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching data type input/output
+ TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching y dimension
+ TensorInfo(TensorShape(23U, 27U, 5U), 1, DataType::F32, 0), // Mismatching total width
+ TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32, 0)
+ }),
+ framework::dataset::make("InputInfo2", { TensorInfo(TensorShape(24U, 27U, 4U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(52U, 27U, 5U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(16U, 27U, 5U), 1, DataType::F32, 0)
+ })),
+ framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(47U, 27U, 5U), 1, DataType::F16, 0),
+ TensorInfo(TensorShape(75U, 12U, 5U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(11U, 27U, 5U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(32U, 27U, 5U), 1, DataType::F32, 0)
+ })),
+ framework::dataset::make("Expected", { false, false, false, true })),
+ input_info1, input_info2, output_info,expected)
+{
+ std::vector<TensorInfo> inputs_vector_info;
+ inputs_vector_info.emplace_back(std::move(input_info1));
+ inputs_vector_info.emplace_back(std::move(input_info2));
+
+ std::vector<ITensorInfo *> inputs_vector_info_raw;
+ for(auto &input : inputs_vector_info)
+ {
+ inputs_vector_info_raw.emplace_back(&input);
+ }
+
+ bool is_valid = bool(CLWidthConcatenateLayer::validate(inputs_vector_info_raw,
+ &output_info.clone()->set_is_resizable(false)));
+ ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+TEST_CASE(Configuration, framework::DatasetMode::ALL)
+{
+ // Create tensors
+ CLTensor src1 = create_tensor<CLTensor>(TensorShape(128U, 32U, 32U), DataType::F32, 1);
+ CLTensor src2 = create_tensor<CLTensor>(TensorShape(32U, 32U, 32U), DataType::F32, 1);
+ CLTensor src3 = create_tensor<CLTensor>(TensorShape(15U, 32U, 32U), DataType::F32, 1);
+ CLTensor dst;
+
+ ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(src3.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ CLWidthConcatenateLayer concat_layer;
+
+ concat_layer.configure({ &src1, &src2, &src3 }, &dst);
+}
+
+template <typename T>
+using CLWidthConcatenateLayerFixture = WidthConcatenateLayerValidationFixture<CLTensor, ICLTensor, CLAccessor, CLWidthConcatenateLayer, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWidthConcatenateLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType",
+ DataType::F16)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWidthConcatenateLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("DataType",
+ DataType::F16)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWidthConcatenateLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType",
+ DataType::F32)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWidthConcatenateLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::WidthConcatenateLayerShapes(), framework::dataset::make("DataType",
+ DataType::F32)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QS8)
+FIXTURE_DATA_TEST_CASE(RunTiny, CLWidthConcatenateLayerFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(),
+ framework::dataset::make("DataType",
+ DataType::QS8)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWidthConcatenateLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::WidthConcatenateLayerShapes(),
+ framework::dataset::make("DataType",
+ DataType::QS8)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(QS16)
+FIXTURE_DATA_TEST_CASE(RunTiny, CLWidthConcatenateLayerFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Tiny2DShapes(),
+ framework::dataset::make("DataType",
+ DataType::QS16)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWidthConcatenateLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(datasets::WidthConcatenateLayerShapes(),
+ framework::dataset::make("DataType",
+ DataType::QS16)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp
index 0707c6a247..25dc6c568d 100644
--- a/tests/validation/Helpers.cpp
+++ b/tests/validation/Helpers.cpp
@@ -98,6 +98,21 @@ TensorShape calculate_depth_concatenate_shape(const std::vector<TensorShape> &in
return out_shape;
}
+TensorShape calculate_width_concatenate_shape(const std::vector<TensorShape> &input_shapes)
+{
+ ARM_COMPUTE_ERROR_ON(input_shapes.empty());
+
+ TensorShape out_shape = input_shapes[0];
+
+ int width = std::accumulate(input_shapes.begin(), input_shapes.end(), 0, [](int sum, const TensorShape & shape)
+ {
+ return sum + shape.x();
+ });
+ out_shape.set(0, width);
+
+ return out_shape;
+}
+
HarrisCornersParameters harris_corners_parameters()
{
HarrisCornersParameters params;
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index b5597090c6..d07803fb94 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -148,6 +148,14 @@ void fill_mask_from_pattern(uint8_t *mask, int cols, int rows, MatrixPattern pat
*/
TensorShape calculate_depth_concatenate_shape(const std::vector<TensorShape> &input_shapes);
+/** Calculate output tensor shape give a vector of input tensor to concatenate
+ *
+ * @param[in] input_shapes Shapes of the tensors to concatenate across width.
+ *
+ * @return The shape of output concatenated tensor.
+ */
+TensorShape calculate_width_concatenate_shape(const std::vector<TensorShape> &input_shapes);
+
/** Parameters of Harris Corners algorithm. */
struct HarrisCornersParameters
{
diff --git a/tests/validation/fixtures/WidthConcatenateLayerFixture.h b/tests/validation/fixtures/WidthConcatenateLayerFixture.h
new file mode 100644
index 0000000000..cf9b12eab6
--- /dev/null
+++ b/tests/validation/fixtures/WidthConcatenateLayerFixture.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_FIXTURE
+#define ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/WidthConcatenateLayer.h"
+
+#include <random>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename ITensorType, typename AccessorType, typename FunctionType, typename T>
+class WidthConcatenateLayerValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, DataType data_type)
+ {
+ // Create input shapes
+ std::mt19937 gen(library->seed());
+ std::uniform_int_distribution<> num_dis(2, 4);
+ const int num_tensors = num_dis(gen);
+
+ std::vector<TensorShape> shapes(num_tensors, shape);
+ std::bernoulli_distribution mutate_dis(0.5f);
+ std::uniform_real_distribution<> change_dis(-0.25f, 0.f);
+
+ // Generate more shapes based on the input
+ for(auto &s : shapes)
+ {
+ // Randomly change the first dimension
+ if(mutate_dis(gen))
+ {
+ // Decrease the dimension by a small percentage. Don't increase
+ // as that could make tensor too large.
+ s.set(0, s[0] + 2 * static_cast<int>(s[0] * change_dis(gen)));
+ }
+ }
+
+ _target = compute_target(shapes, data_type);
+ _reference = compute_reference(shapes, data_type);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ library->fill_tensor_uniform(tensor, i);
+ }
+
+ TensorType compute_target(std::vector<TensorShape> shapes, DataType data_type)
+ {
+ std::vector<TensorType> srcs;
+ std::vector<ITensorType *> src_ptrs;
+
+ // Create tensors
+ srcs.reserve(shapes.size());
+
+ for(const auto &shape : shapes)
+ {
+ srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1, _fractional_bits));
+ src_ptrs.emplace_back(&srcs.back());
+ }
+
+ TensorShape dst_shape = misc::shape_calculator::calculate_width_concatenate_shape(src_ptrs);
+ TensorType dst = create_tensor<TensorType>(dst_shape, data_type, 1, _fractional_bits);
+
+ // Create and configure function
+ FunctionType width_concat;
+ width_concat.configure(src_ptrs, &dst);
+
+ for(auto &src : srcs)
+ {
+ ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
+
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ for(auto &src : srcs)
+ {
+ src.allocator()->allocate();
+ ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
+
+ dst.allocator()->allocate();
+ ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ int i = 0;
+ for(auto &src : srcs)
+ {
+ fill(AccessorType(src), i++);
+ }
+
+ // Compute function
+ width_concat.run();
+
+ return dst;
+ }
+
+ SimpleTensor<T> compute_reference(std::vector<TensorShape> shapes, DataType data_type)
+ {
+ std::vector<SimpleTensor<T>> srcs;
+
+ // Create and fill tensors
+ int i = 0;
+ for(const auto &shape : shapes)
+ {
+ srcs.emplace_back(shape, data_type, 1, _fractional_bits);
+ fill(srcs.back(), i++);
+ }
+
+ return reference::widthconcatenate_layer<T>(srcs);
+ }
+
+ TensorType _target{};
+ SimpleTensor<T> _reference{};
+
+private:
+ int _fractional_bits{ 1 };
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_FIXTURE */
diff --git a/tests/validation/reference/WidthConcatenateLayer.cpp b/tests/validation/reference/WidthConcatenateLayer.cpp
new file mode 100644
index 0000000000..fe79b4a138
--- /dev/null
+++ b/tests/validation/reference/WidthConcatenateLayer.cpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "WidthConcatenateLayer.h"
+
+#include "tests/validation/FixedPoint.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> widthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
+{
+ // Create reference
+ std::vector<TensorShape> shapes;
+
+ for(const auto &src : srcs)
+ {
+ shapes.emplace_back(src.shape());
+ }
+
+ DataType dst_type = srcs.empty() ? DataType::UNKNOWN : srcs[0].data_type();
+ TensorShape dst_shape = calculate_width_concatenate_shape(shapes);
+ SimpleTensor<T> dst(dst_shape, dst_type);
+
+ // Compute reference
+ int width_offset = 0;
+ const int width_out = dst.shape().x();
+
+ // Set output tensor to 0
+ std::fill_n(dst.data(), dst.num_elements(), 0);
+
+ for(const auto &src : srcs)
+ {
+ ARM_COMPUTE_ERROR_ON(width_offset >= width_out);
+
+ const int width = src.shape().x();
+ const int height = src.shape().y();
+ const int depth = src.shape().z();
+
+ const T *src_ptr = src.data();
+ T *dst_ptr = dst.data();
+
+ for(int d = 0; d < depth; ++d)
+ {
+ for(int r = 0; r < height; ++r)
+ {
+ int offset = d * height + r;
+ std::copy(src_ptr, src_ptr + width, dst_ptr + width_offset + offset * width_out);
+ src_ptr += width;
+ }
+ }
+
+ width_offset += width;
+ }
+
+ return dst;
+}
+
+template SimpleTensor<float> widthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs);
+template SimpleTensor<half> widthconcatenate_layer(const std::vector<SimpleTensor<half>> &srcs);
+template SimpleTensor<qint8_t> widthconcatenate_layer(const std::vector<SimpleTensor<qint8_t>> &srcs);
+template SimpleTensor<qint16_t> widthconcatenate_layer(const std::vector<SimpleTensor<qint16_t>> &srcs);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/reference/WidthConcatenateLayer.h b/tests/validation/reference/WidthConcatenateLayer.h
new file mode 100644
index 0000000000..237e72b947
--- /dev/null
+++ b/tests/validation/reference/WidthConcatenateLayer.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_H__
+#define __ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_H__
+
+#include "tests/SimpleTensor.h"
+
+#include <vector>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> widthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_WIDTHCONCATENATE_LAYER_H__ */