aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorVidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com>2019-06-28 14:09:53 +0100
committerVidhyaSudhan Loganathan <vidhyasudhan.loganathan@arm.com>2019-06-28 14:15:30 +0000
commit338595bca8ab60492f10626860acb1ab3722b1ce (patch)
tree03504ec3a2973e30c80f9bf56b77b4a4c7c9d83c /tests
parent7026b303d636e7639f8877ae8d5eff54f39c1121 (diff)
downloadComputeLibrary-338595bca8ab60492f10626860acb1ab3722b1ce.tar.gz
COMPMID-2234 : Add support for axis 3 in NE/CLConcatenateLayer
Change-Id: Ic86f89ece3afe72809bc69c6de6fee7d21daa1d4 Signed-off-by: Vidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com> Reviewed-on: https://review.mlplatform.org/c/1440 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/CL/BatchConcatenateLayer.cpp170
-rw-r--r--tests/validation/NEON/BatchConcatenateLayer.cpp154
-rw-r--r--tests/validation/reference/ConcatenateLayer.cpp10
3 files changed, 334 insertions, 0 deletions
diff --git a/tests/validation/CL/BatchConcatenateLayer.cpp b/tests/validation/CL/BatchConcatenateLayer.cpp
new file mode 100644
index 0000000000..b789569155
--- /dev/null
+++ b/tests/validation/CL/BatchConcatenateLayer.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLConcatenateLayer.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/ConcatenateLayerFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(CL)
+TEST_SUITE(BatchConcatenateLayer)
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+ framework::dataset::make("InputInfo1", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), // Mismatching data type input/output
+ TensorInfo(TensorShape(20U, 27U, 4U, 4U), 1, DataType::F32), // Mismatching x dimension
+ TensorInfo(TensorShape(23U, 26U, 4U, 3U), 1, DataType::F32), // Mismatching y dim
+ TensorInfo(TensorShape(23U, 27U, 4U, 3U), 1, DataType::F32), // Mismatching z dim
+ TensorInfo(TensorShape(16U, 27U, 3U, 6U), 1, DataType::F32)
+ }),
+ framework::dataset::make("InputInfo2", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(23U, 27U, 3U, 3U), 1, DataType::F32),
+ TensorInfo(TensorShape(16U, 27U, 3U, 6U), 1, DataType::F32)
+ })),
+ framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F16),
+ TensorInfo(TensorShape(23U, 12U, 4U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(23U, 20U, 4U, 3U), 1, DataType::F32),
+ TensorInfo(TensorShape(16U, 27U, 3U, 12U), 1, DataType::F32)
+ })),
+ framework::dataset::make("Expected", { false, false, false, false, true })),
+ input_info1, input_info2, output_info,expected)
+{
+ std::vector<TensorInfo> inputs_vector_info;
+ inputs_vector_info.emplace_back(std::move(input_info1));
+ inputs_vector_info.emplace_back(std::move(input_info2));
+
+ std::vector<ITensorInfo *> inputs_vector_info_raw;
+ inputs_vector_info_raw.reserve(inputs_vector_info.size());
+ for(auto &input : inputs_vector_info)
+ {
+ inputs_vector_info_raw.emplace_back(&input);
+ }
+
+ bool is_valid = bool(CLConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), 3));
+ ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+TEST_CASE(Configuration, framework::DatasetMode::ALL)
+{
+ // Create tensors
+ CLTensor src1 = create_tensor<CLTensor>(TensorShape(128U, 32U, 32U), DataType::F32, 1);
+ CLTensor src2 = create_tensor<CLTensor>(TensorShape(128U, 32U, 32U), DataType::F32, 1);
+ CLTensor src3 = create_tensor<CLTensor>(TensorShape(128U, 32U, 32U), DataType::F32, 1);
+ CLTensor dst;
+
+ ARM_COMPUTE_EXPECT(src1.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(src2.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(src3.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ CLConcatenateLayer concat_layer;
+
+ concat_layer.configure({ &src1, &src2, &src3 }, &dst, 3);
+}
+template <typename T>
+using CLBatchConcatenateLayerFixture = ConcatenateLayerValidationFixture<CLTensor, ICLTensor, CLAccessor, CLConcatenateLayer, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchConcatenateLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()),
+ framework::dataset::make("DataType",
+ DataType::F16)),
+ framework::dataset::make("Axis", 3)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchConcatenateLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(concat(datasets::Large3DShapes(), datasets::Small4DShapes()),
+ framework::dataset::make("DataType",
+ DataType::F16)),
+ framework::dataset::make("Axis", 3)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchConcatenateLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()),
+ framework::dataset::make("DataType",
+ DataType::F32)),
+ framework::dataset::make("Axis", 3)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchConcatenateLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), framework::dataset::make("DataType",
+ DataType::F32)),
+ framework::dataset::make("Axis", 3)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLBatchConcatenateLayerFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()),
+ framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ framework::dataset::make("Axis", 3)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLBatchConcatenateLayerFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ framework::dataset::make("Axis", 3)))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/NEON/BatchConcatenateLayer.cpp b/tests/validation/NEON/BatchConcatenateLayer.cpp
new file mode 100644
index 0000000000..f95663dbd3
--- /dev/null
+++ b/tests/validation/NEON/BatchConcatenateLayer.cpp
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEConcatenateLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/ConcatenateLayerFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(NEON)
+TEST_SUITE(BatchConcatenateLayer)
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+ framework::dataset::make("InputInfo1", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32), // Mismatching data type input/output
+ TensorInfo(TensorShape(20U, 27U, 4U, 4U), 1, DataType::F32), // Mismatching x dimension
+ TensorInfo(TensorShape(23U, 26U, 4U, 3U), 1, DataType::F32), // Mismatching y dim
+ TensorInfo(TensorShape(23U, 27U, 4U, 3U), 1, DataType::F32), // Mismatching z dim
+ TensorInfo(TensorShape(16U, 27U, 3U, 6U), 1, DataType::F32)
+ }),
+ framework::dataset::make("InputInfo2", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(23U, 27U, 3U, 3U), 1, DataType::F32),
+ TensorInfo(TensorShape(16U, 27U, 3U, 6U), 1, DataType::F32)
+ })),
+ framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(23U, 27U, 5U, 4U), 1, DataType::F16),
+ TensorInfo(TensorShape(23U, 12U, 4U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(23U, 27U, 4U, 4U), 1, DataType::F32),
+ TensorInfo(TensorShape(23U, 20U, 4U, 3U), 1, DataType::F32),
+ TensorInfo(TensorShape(16U, 27U, 3U, 12U), 1, DataType::F32)
+ })),
+ framework::dataset::make("Expected", { false, false, false, false, true })),
+ input_info1, input_info2, output_info,expected)
+{
+ std::vector<TensorInfo> inputs_vector_info;
+ inputs_vector_info.emplace_back(std::move(input_info1));
+ inputs_vector_info.emplace_back(std::move(input_info2));
+
+ std::vector<ITensorInfo *> inputs_vector_info_raw;
+ inputs_vector_info_raw.reserve(inputs_vector_info.size());
+ for(auto &input : inputs_vector_info)
+ {
+ inputs_vector_info_raw.emplace_back(&input);
+ }
+
+ bool is_valid = bool(NEConcatenateLayer::validate(inputs_vector_info_raw, &output_info.clone()->set_is_resizable(false), 3));
+ ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+template <typename T>
+using NEBatchConcatenateLayerFixture = ConcatenateLayerValidationFixture<Tensor, ITensor, Accessor, NEConcatenateLayer, T>;
+
+TEST_SUITE(Float)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEBatchConcatenateLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small2DShapes(), datasets::Tiny4DShapes()),
+ framework::dataset::make("DataType",
+ DataType::F16)),
+ framework::dataset::make("Axis", 3)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEBatchConcatenateLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), framework::dataset::make("DataType",
+ DataType::F16)),
+ framework::dataset::make("Axis", 3)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END()
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEBatchConcatenateLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()),
+ framework::dataset::make("DataType",
+ DataType::F32)),
+ framework::dataset::make("Axis", 3)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEBatchConcatenateLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(), framework::dataset::make("DataType",
+ DataType::F32)),
+ framework::dataset::make("Axis", 3)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEBatchConcatenateLayerFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(concat(datasets::Small3DShapes(), datasets::Tiny4DShapes()),
+ framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ framework::dataset::make("Axis", 3)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEBatchConcatenateLayerFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::ConcatenateLayerShapes(),
+ framework::dataset::make("DataType",
+ DataType::QASYMM8)),
+ framework::dataset::make("Axis", 3)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/reference/ConcatenateLayer.cpp b/tests/validation/reference/ConcatenateLayer.cpp
index 6c90d74a0f..aa74ca2474 100644
--- a/tests/validation/reference/ConcatenateLayer.cpp
+++ b/tests/validation/reference/ConcatenateLayer.cpp
@@ -127,6 +127,16 @@ SimpleTensor<T> concatenate_layer(std::vector<SimpleTensor<T>> &srcs, SimpleTens
dst = reference::permute<T>(dst, PermutationVector(2U, 1U, 0U));
return reference::permute<T>(widthconcatenate_layer(srcs, dst), PermutationVector(2U, 1U, 0U));
}
+ case 3:
+ {
+ for(auto &t : srcs)
+ {
+ t = reference::permute<T>(t, PermutationVector(3U, 2U, 1U, 0U));
+ }
+ dst = reference::permute<T>(dst, PermutationVector(3U, 2U, 1U, 0U));
+ auto ret = reference::permute<T>(widthconcatenate_layer(srcs, dst), PermutationVector(3U, 2U, 1U, 0U));
+ return ret;
+ }
default:
{
ARM_COMPUTE_ERROR("Not supported");