aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
authorIoan-Cristian Szabo <ioan-cristian.szabo@arm.com>2017-11-16 17:55:03 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commitae3c8abdd9cf474d39991863f9170257f4a28ff2 (patch)
tree2447fe28aeb0131e0cdd6c6b8101127396061028 /tests/validation
parent156fcf3f36f6168e47d65db167bba3af5037e3d9 (diff)
downloadComputeLibrary-ae3c8abdd9cf474d39991863f9170257f4a28ff2.tar.gz
COMPMID-584: Add validation to channel_combine kernels
Change-Id: I67fe3fcea08704d9f4b04d22fe34db83b2697b87 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110562 Reviewed-by: Pablo Tello <pablo.tello@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/CL/ChannelCombine.cpp160
-rw-r--r--tests/validation/NEON/ChannelCombine.cpp159
-rw-r--r--tests/validation/fixtures/ChannelCombineFixture.h266
-rw-r--r--tests/validation/reference/ChannelCombine.cpp201
-rw-r--r--tests/validation/reference/ChannelCombine.h43
5 files changed, 829 insertions, 0 deletions
diff --git a/tests/validation/CL/ChannelCombine.cpp b/tests/validation/CL/ChannelCombine.cpp
new file mode 100644
index 0000000000..fd9049a46a
--- /dev/null
+++ b/tests/validation/CL/ChannelCombine.cpp
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLMultiImage.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLChannelCombine.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ConvertPolicyDataset.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/ChannelCombineFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+inline void validate_configuration(const TensorShape &shape, Format format)
+{
+ const int num_planes = num_planes_from_format(format);
+
+ // Create tensors
+ CLMultiImage dst = create_multi_image<CLMultiImage>(shape, format);
+ std::vector<CLTensor> ref_src = create_tensor_planes<CLTensor>(shape, format);
+
+ // Create and configure function
+ CLChannelCombine channel_combine;
+
+ if(num_planes == 1)
+ {
+ const CLTensor *tensor_extra = ((Format::RGBA8888 == format) ? &ref_src[3] : nullptr);
+
+ channel_combine.configure(&ref_src[0], &ref_src[1], &ref_src[2], tensor_extra, dst.cl_plane(0));
+ }
+ else
+ {
+ channel_combine.configure(&ref_src[0], &ref_src[1], &ref_src[2], &dst);
+ }
+
+ // TODO(bsgcomp): Add validation for padding and shape (COMPMID-659)
+}
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(ChannelCombine)
+
+template <typename T>
+using CLChannelCombineFixture = ChannelCombineValidationFixture<CLMultiImage, CLTensor, CLAccessor, CLChannelCombine, T>;
+
+TEST_SUITE(Configuration)
+DATA_TEST_CASE(RGBA, framework::DatasetMode::ALL, combine(concat(datasets::Small2DShapes(), datasets::Large2DShapes()), framework::dataset::make("FormatType", { Format::RGB888, Format::RGBA8888 })),
+ shape, format)
+{
+ validate_configuration(shape, format);
+}
+DATA_TEST_CASE(YUV, framework::DatasetMode::ALL, combine(concat(datasets::Small2DShapes(), datasets::Large2DShapes()), framework::dataset::make("FormatType", { Format::YUYV422, Format::UYVY422 })),
+ shape, format)
+{
+ validate_configuration(shape, format);
+}
+
+DATA_TEST_CASE(YUVPlanar, framework::DatasetMode::ALL, combine(concat(datasets::Small2DShapes(), datasets::Large2DShapes()), framework::dataset::make("FormatType", { Format::IYUV, Format::YUV444, Format::NV12, Format::NV21 })),
+ shape, format)
+{
+ validate_configuration(shape, format);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(RGBA)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLChannelCombineFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("FormatType", { Format::RGB888, Format::RGBA8888 })))
+{
+ // Validate output
+ for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+ {
+ validate(CLAccessor(*_target.cl_plane(plane_idx)), _reference[plane_idx]);
+ }
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLChannelCombineFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("FormatType", { Format::RGB888, Format::RGBA8888 })))
+{
+ // Validate output
+ for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+ {
+ validate(CLAccessor(*_target.cl_plane(plane_idx)), _reference[plane_idx]);
+ }
+}
+TEST_SUITE_END()
+
+TEST_SUITE(YUV)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLChannelCombineFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("FormatType", { Format::YUYV422, Format::UYVY422 })))
+{
+ // Validate output
+ for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+ {
+ validate(CLAccessor(*_target.cl_plane(plane_idx)), _reference[plane_idx]);
+ }
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLChannelCombineFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("FormatType", { Format::YUYV422, Format::UYVY422 })))
+{
+ // Validate output
+ for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+ {
+ validate(CLAccessor(*_target.cl_plane(plane_idx)), _reference[plane_idx]);
+ }
+}
+TEST_SUITE_END()
+
+TEST_SUITE(YUVPlanar)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLChannelCombineFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("FormatType", { Format::NV12, Format::NV21, Format::IYUV, Format::YUV444 })))
+{
+ // Validate output
+ for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+ {
+ validate(CLAccessor(*_target.cl_plane(plane_idx)), _reference[plane_idx]);
+ }
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLChannelCombineFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("FormatType", { Format::NV12, Format::NV21, Format::IYUV, Format::YUV444 })))
+{
+ // Validate output
+ for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+ {
+ validate(CLAccessor(*_target.cl_plane(plane_idx)), _reference[plane_idx]);
+ }
+}
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/NEON/ChannelCombine.cpp b/tests/validation/NEON/ChannelCombine.cpp
new file mode 100644
index 0000000000..3dbc64dbd2
--- /dev/null
+++ b/tests/validation/NEON/ChannelCombine.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONCLCTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/MultiImage.h"
+#include "arm_compute/runtime/NEON/functions/NEChannelCombine.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ConvertPolicyDataset.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/ChannelCombineFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+inline void validate_configuration(const TensorShape &shape, Format format)
+{
+ const int num_planes = num_planes_from_format(format);
+
+ // Create tensors
+ MultiImage dst = create_multi_image<MultiImage>(shape, format);
+ std::vector<Tensor> ref_src = create_tensor_planes<Tensor>(shape, format);
+
+ // Create and configure function
+ NEChannelCombine channel_combine;
+
+ if(num_planes == 1)
+ {
+ const Tensor *tensor_extra = Format::RGBA8888 == format ? &ref_src[3] : nullptr;
+
+ channel_combine.configure(&ref_src[0], &ref_src[1], &ref_src[2], tensor_extra, dst.plane(0));
+ }
+ else
+ {
+ channel_combine.configure(&ref_src[0], &ref_src[1], &ref_src[2], &dst);
+ }
+
+ // TODO(bsgcomp): Add validation for padding and shape (COMPMID-659)
+}
+} // namespace
+
+TEST_SUITE(NEON)
+TEST_SUITE(ChannelCombine)
+
+TEST_SUITE(Configuration)
+DATA_TEST_CASE(RGBA, framework::DatasetMode::ALL, combine(concat(datasets::Small2DShapes(), datasets::Large2DShapes()), framework::dataset::make("FormatType", { Format::RGB888, Format::RGBA8888 })),
+ shape, format)
+{
+ validate_configuration(shape, format);
+}
+DATA_TEST_CASE(YUV, framework::DatasetMode::ALL, combine(concat(datasets::Small2DShapes(), datasets::Large2DShapes()), framework::dataset::make("FormatType", { Format::YUYV422, Format::UYVY422 })),
+ shape, format)
+{
+ validate_configuration(shape, format);
+}
+
+DATA_TEST_CASE(YUVPlanar, framework::DatasetMode::ALL, combine(concat(datasets::Small2DShapes(), datasets::Large2DShapes()), framework::dataset::make("FormatType", { Format::IYUV, Format::YUV444, Format::NV12, Format::NV21 })),
+ shape, format)
+{
+ validate_configuration(shape, format);
+}
+TEST_SUITE_END()
+
+template <typename T>
+using NEChannelCombineFixture = ChannelCombineValidationFixture<MultiImage, Tensor, Accessor, NEChannelCombine, T>;
+
+TEST_SUITE(RGBA)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEChannelCombineFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("FormatType", { Format::RGB888, Format::RGBA8888 })))
+{
+ // Validate output
+ for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+ {
+ validate(Accessor(*_target.plane(plane_idx)), _reference[plane_idx]);
+ }
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEChannelCombineFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("FormatType", { Format::RGB888, Format::RGBA8888 })))
+{
+ // Validate output
+ for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+ {
+ validate(Accessor(*_target.plane(plane_idx)), _reference[plane_idx]);
+ }
+}
+TEST_SUITE_END()
+
+TEST_SUITE(YUV)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEChannelCombineFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("FormatType", { Format::YUYV422, Format::UYVY422 })))
+{
+ // Validate output
+ for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+ {
+ validate(Accessor(*_target.plane(plane_idx)), _reference[plane_idx]);
+ }
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEChannelCombineFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("FormatType", { Format::YUYV422, Format::UYVY422 })))
+{
+ // Validate output
+ for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+ {
+ validate(Accessor(*_target.plane(plane_idx)), _reference[plane_idx]);
+ }
+}
+TEST_SUITE_END()
+
+TEST_SUITE(YUVPlanar)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEChannelCombineFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("FormatType", { Format::NV12, Format::NV21, Format::IYUV, Format::YUV444 })))
+{
+ // Validate output
+ for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+ {
+ validate(Accessor(*_target.plane(plane_idx)), _reference[plane_idx]);
+ }
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEChannelCombineFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("FormatType", { Format::NV12, Format::NV21, Format::IYUV, Format::YUV444 })))
+{
+ // Validate output
+ for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+ {
+ validate(Accessor(*_target.plane(plane_idx)), _reference[plane_idx]);
+ }
+}
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/ChannelCombineFixture.h b/tests/validation/fixtures/ChannelCombineFixture.h
new file mode 100644
index 0000000000..68d023715c
--- /dev/null
+++ b/tests/validation/fixtures/ChannelCombineFixture.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_CHANNEL_COMBINE_FIXTURE
+#define ARM_COMPUTE_TEST_CHANNEL_COMBINE_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ChannelCombine.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+template <typename TensorType>
+inline std::vector<TensorType> create_tensor_planes(const TensorShape &shape, Format format)
+{
+ TensorShape image_shape = adjust_odd_shape(shape, format);
+ TensorInfo info(image_shape, Format::U8);
+
+ std::vector<TensorType> tensor_planes;
+
+ switch(format)
+ {
+ case Format::RGB888:
+ case Format::RGBA8888:
+ case Format::YUV444:
+ {
+ tensor_planes.resize(3);
+
+ if(format == Format::RGBA8888)
+ {
+ tensor_planes.resize(4);
+ }
+
+ for(unsigned int plane_idx = 0; plane_idx < tensor_planes.size(); ++plane_idx)
+ {
+ tensor_planes[plane_idx].allocator()->init(info);
+ }
+
+ break;
+ }
+ case Format::YUYV422:
+ case Format::UYVY422:
+ {
+ const TensorShape uv_shape = calculate_subsampled_shape(image_shape, format);
+ const TensorInfo info_hor2(uv_shape, Format::U8);
+
+ tensor_planes.resize(3);
+
+ tensor_planes[0].allocator()->init(info);
+ tensor_planes[1].allocator()->init(info_hor2);
+ tensor_planes[2].allocator()->init(info_hor2);
+ break;
+ }
+ case Format::NV12:
+ case Format::NV21:
+ case Format::IYUV:
+ {
+ const TensorShape sub2_shape = calculate_subsampled_shape(image_shape, format);
+ const TensorInfo info_sub2(sub2_shape, Format::U8);
+
+ tensor_planes.resize(3);
+
+ tensor_planes[0].allocator()->init(info);
+ tensor_planes[1].allocator()->init(info_sub2);
+ tensor_planes[2].allocator()->init(info_sub2);
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ break;
+ }
+
+ return tensor_planes;
+}
+} // namespace
+
+template <typename MultiImageType, typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ChannelCombineValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(TensorShape shape, Format format)
+ {
+ _num_planes = num_planes_from_format(format);
+ _target = compute_target(shape, format);
+ _reference = compute_reference(shape, format);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ library->fill_tensor_uniform(tensor, i);
+ }
+
+ template <typename U>
+ std::vector<SimpleTensor<U>> create_tensor_planes_reference(const TensorShape &shape, Format format)
+ {
+ std::vector<SimpleTensor<U>> tensor_planes;
+
+ TensorShape image_shape = adjust_odd_shape(shape, format);
+
+ switch(format)
+ {
+ case Format::RGB888:
+ case Format::RGBA8888:
+ case Format::YUV444:
+ {
+ if(format == Format::RGBA8888)
+ {
+ tensor_planes.emplace_back(image_shape, Format::U8);
+ }
+
+ tensor_planes.emplace_back(image_shape, Format::U8);
+ tensor_planes.emplace_back(image_shape, Format::U8);
+ tensor_planes.emplace_back(image_shape, Format::U8);
+ break;
+ }
+ case Format::YUYV422:
+ case Format::UYVY422:
+ {
+ const TensorShape hor2_shape = calculate_subsampled_shape(image_shape, format);
+
+ tensor_planes.emplace_back(image_shape, Format::U8);
+ tensor_planes.emplace_back(hor2_shape, Format::U8);
+ tensor_planes.emplace_back(hor2_shape, Format::U8);
+ break;
+ }
+ case Format::NV12:
+ case Format::NV21:
+ case Format::IYUV:
+ {
+ const TensorShape shape_sub2 = calculate_subsampled_shape(image_shape, format);
+
+ tensor_planes.emplace_back(image_shape, Format::U8);
+ tensor_planes.emplace_back(shape_sub2, Format::U8);
+ tensor_planes.emplace_back(shape_sub2, Format::U8);
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ break;
+ }
+
+ return tensor_planes;
+ }
+
+ MultiImageType compute_target(const TensorShape &shape, Format format)
+ {
+ // Create tensors
+ std::vector<TensorType> ref_src = create_tensor_planes<TensorType>(shape, format);
+ MultiImageType dst = create_multi_image<MultiImageType>(shape, format);
+
+ // Create and configure function
+ FunctionType channel_combine;
+
+ if(1 == _num_planes)
+ {
+ const TensorType *tensor_extra = ((Format::RGBA8888 == format) ? &ref_src[3] : nullptr);
+ TensorType *tensor_dst = dynamic_cast<TensorType *>(dst.plane(0));
+
+ channel_combine.configure(&ref_src[0], &ref_src[1], &ref_src[2], tensor_extra, tensor_dst);
+ }
+ else
+ {
+ channel_combine.configure(&ref_src[0], &ref_src[1], &ref_src[2], &dst);
+ }
+
+ for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+ {
+ const TensorType *dst_plane = static_cast<const TensorType *>(dst.plane(plane_idx));
+
+ ARM_COMPUTE_EXPECT(dst_plane->info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
+
+ for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
+ {
+ ARM_COMPUTE_EXPECT(ref_src[plane_idx].info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
+
+ // Allocate tensors
+ dst.allocate();
+
+ for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
+ {
+ ref_src[plane_idx].allocator()->allocate();
+ }
+
+ for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+ {
+ const TensorType *dst_plane = static_cast<const TensorType *>(dst.plane(plane_idx));
+
+ ARM_COMPUTE_EXPECT(!dst_plane->info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
+
+ for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
+ {
+ ARM_COMPUTE_EXPECT(!ref_src[plane_idx].info()->is_resizable(), framework::LogLevel::ERRORS);
+ }
+
+ // Fill tensor planes
+ for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
+ {
+ fill(AccessorType(ref_src[plane_idx]), plane_idx);
+ }
+
+ // Compute function
+ channel_combine.run();
+
+ return dst;
+ }
+
+ std::vector<SimpleTensor<T>> compute_reference(const TensorShape &shape, Format format)
+ {
+ // Create reference
+ std::vector<SimpleTensor<T>> ref_src = create_tensor_planes_reference<T>(shape, format);
+
+ // Fill references
+ for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
+ {
+ fill(ref_src[plane_idx], plane_idx);
+ }
+
+ return reference::channel_combine<T>(shape, ref_src, format);
+ }
+
+ unsigned int _num_planes{};
+ MultiImageType _target{};
+ std::vector<SimpleTensor<T>> _reference{};
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_CHANNEL_COMBINE_FIXTURE */
diff --git a/tests/validation/reference/ChannelCombine.cpp b/tests/validation/reference/ChannelCombine.cpp
new file mode 100644
index 0000000000..c1ec3ec578
--- /dev/null
+++ b/tests/validation/reference/ChannelCombine.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "ChannelCombine.h"
+
+#include "arm_compute/core/Types.h"
+#include "tests/validation/FixedPoint.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+namespace
+{
+template <typename T>
+inline std::vector<SimpleTensor<T>> create_image_planes(const TensorShape &shape, Format format)
+{
+ TensorShape image_shape = adjust_odd_shape(shape, format);
+
+ std::vector<SimpleTensor<T>> image_planes;
+
+ switch(format)
+ {
+ case Format::RGB888:
+ case Format::RGBA8888:
+ case Format::YUYV422:
+ case Format::UYVY422:
+ {
+ image_planes.emplace_back(image_shape, format);
+ break;
+ }
+ case Format::NV12:
+ case Format::NV21:
+ {
+ TensorShape shape_uv88 = calculate_subsampled_shape(image_shape, Format::UV88);
+
+ image_planes.emplace_back(image_shape, Format::U8);
+ image_planes.emplace_back(shape_uv88, Format::UV88);
+ break;
+ }
+ case Format::IYUV:
+ {
+ TensorShape shape_sub2 = calculate_subsampled_shape(image_shape, Format::IYUV);
+
+ image_planes.emplace_back(image_shape, Format::U8);
+ image_planes.emplace_back(shape_sub2, Format::U8);
+ image_planes.emplace_back(shape_sub2, Format::U8);
+ break;
+ }
+ case Format::YUV444:
+ {
+ image_planes.emplace_back(image_shape, Format::U8);
+ image_planes.emplace_back(image_shape, Format::U8);
+ image_planes.emplace_back(image_shape, Format::U8);
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ break;
+ }
+
+ return image_planes;
+}
+} // namespace
+
+template <typename T>
+std::vector<SimpleTensor<T>> channel_combine(const TensorShape &shape, const std::vector<SimpleTensor<T>> &image_planes, Format format)
+{
+ std::vector<SimpleTensor<T>> dst = create_image_planes<T>(shape, format);
+
+ for(unsigned int plane_idx = 0; plane_idx < dst.size(); ++plane_idx)
+ {
+ SimpleTensor<T> &dst_tensor = dst[plane_idx];
+
+ for(int element_idx = 0; element_idx < dst_tensor.num_elements(); ++element_idx)
+ {
+ Coordinates coord = index2coord(dst_tensor.shape(), element_idx);
+
+ switch(format)
+ {
+ case Format::RGB888:
+ case Format::RGBA8888:
+ {
+ // Copy R/G/B or A channel
+ for(int channel_idx = 0; channel_idx < dst_tensor.num_channels(); ++channel_idx)
+ {
+ const T &src_value = reinterpret_cast<const T *>(image_planes[channel_idx](coord))[0];
+ T &dst_value = reinterpret_cast<T *>(dst_tensor(coord))[channel_idx];
+
+ dst_value = src_value;
+ }
+ break;
+ }
+ case Format::YUYV422:
+ case Format::UYVY422:
+ {
+ // Find coordinates of the sub-sampled pixel
+ const Coordinates coord_hori(coord.x() / 2, coord.y());
+
+ const T &src0 = reinterpret_cast<const T *>(image_planes[0](coord))[0];
+ const T &src1 = reinterpret_cast<const T *>(image_planes[1](coord_hori))[0];
+
+ const int shift = (Format::YUYV422 == format) ? 1 : 0;
+ T &dst0 = reinterpret_cast<T *>(dst_tensor(coord))[1 - shift];
+ T &dst1 = reinterpret_cast<T *>(dst_tensor(coord))[0 + shift];
+
+ dst0 = src0;
+ dst1 = src1;
+
+ Coordinates coord2 = index2coord(dst_tensor.shape(), ++element_idx);
+
+ const T &src2 = reinterpret_cast<const T *>(image_planes[0](coord2))[0];
+ const T &src3 = reinterpret_cast<const T *>(image_planes[2](coord_hori))[0];
+
+ T &dst2 = reinterpret_cast<T *>(dst_tensor(coord2))[1 - shift];
+ T &dst3 = reinterpret_cast<T *>(dst_tensor(coord2))[0 + shift];
+
+ dst2 = src2;
+ dst3 = src3;
+
+ break;
+ }
+ case Format::NV12:
+ case Format::NV21:
+ {
+ if(0U == plane_idx)
+ {
+ // Get and combine Y channel from plane0 of destination multi-image
+ dst_tensor[element_idx] = image_planes[0][element_idx];
+ }
+ else
+ {
+ const int shift = (Format::NV12 == format) ? 0 : 1;
+
+ // Get U channel from plane1 and V channel from plane2 of the source
+ const T &src_u0 = reinterpret_cast<const T *>(image_planes[1](coord))[0];
+ const T &src_v0 = reinterpret_cast<const T *>(image_planes[2](coord))[0];
+
+ // Get U and V channel from plane1 of destination multi-image
+ T &dst_u0 = reinterpret_cast<T *>(dst_tensor(coord))[0 + shift];
+ T &dst_v0 = reinterpret_cast<T *>(dst_tensor(coord))[1 - shift];
+
+ // Combine channel U and V
+ dst_u0 = src_u0;
+ dst_v0 = src_v0;
+ }
+
+ break;
+ }
+ case Format::IYUV:
+ case Format::YUV444:
+ {
+ // Get Y/U/V element
+ const T &src = reinterpret_cast<const T *>(image_planes[plane_idx](coord))[0];
+ T &dst = reinterpret_cast<T *>(dst_tensor(coord))[0];
+
+ // Copy Y/U/V plane
+ dst = src;
+
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ break;
+ }
+ }
+ }
+
+ return dst;
+}
+
+template std::vector<SimpleTensor<uint8_t>> channel_combine(const TensorShape &shape, const std::vector<SimpleTensor<uint8_t>> &image_planes, Format format);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/reference/ChannelCombine.h b/tests/validation/reference/ChannelCombine.h
new file mode 100644
index 0000000000..cc6607de49
--- /dev/null
+++ b/tests/validation/reference/ChannelCombine.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_CHANNEL_COMBINE_H__
+#define __ARM_COMPUTE_TEST_CHANNEL_COMBINE_H__
+
+#include "tests/SimpleTensor.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+std::vector<SimpleTensor<T>> channel_combine(const TensorShape &shape, const std::vector<SimpleTensor<T>> &image_planes, Format format);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_CHANNEL_COMBINE_H__ */