aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGian Marco <gianmarco.iodice@arm.com>2017-10-18 17:05:02 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitfa4cacdff825a38eac31ef7ecd3ad6b30da53eaa (patch)
tree15982731a9b70660f9b501f0c6bb259e2e4115ff
parent13fc22c3c3f609489e53ec706026a2a7991bf367 (diff)
downloadComputeLibrary-fa4cacdff825a38eac31ef7ecd3ad6b30da53eaa.tar.gz
COMPMID-636 - Extending GEMMLowp validation for NEON intrinsics
Change-Id: Id02a0b3bf5af65dd940b46b2f6634b6a479cf388 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/92275 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--tests/datasets/GEMMLowpDataset.h147
-rw-r--r--tests/datasets/LargeGEMMLowpDataset.h54
-rw-r--r--tests/datasets/SmallGEMMLowpDataset.h54
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp39
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h25
5 files changed, 298 insertions, 21 deletions
diff --git a/tests/datasets/GEMMLowpDataset.h b/tests/datasets/GEMMLowpDataset.h
new file mode 100644
index 0000000000..4bf2a98d61
--- /dev/null
+++ b/tests/datasets/GEMMLowpDataset.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_GEMMLOWP_DATASET
+#define ARM_COMPUTE_TEST_GEMMLOWP_DATASET
+
+#include "utils/TypePrinter.h"
+
+#include "arm_compute/core/TensorShape.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class GEMMLowpDataset
+{
+public:
+ using type = std::tuple<TensorShape, TensorShape, TensorShape, int32_t, int32_t, int32_t, int32_t, int32_t>;
+
+ struct iterator
+ {
+ iterator(std::vector<TensorShape>::const_iterator a_it,
+ std::vector<TensorShape>::const_iterator b_it,
+ std::vector<TensorShape>::const_iterator c_it,
+ std::vector<int32_t>::const_iterator a_offset_it,
+ std::vector<int32_t>::const_iterator b_offset_it,
+ std::vector<int32_t>::const_iterator c_offset_it,
+ std::vector<int32_t>::const_iterator c_mult_int_it,
+ std::vector<int32_t>::const_iterator out_shift_it)
+ : _a_it{ std::move(a_it) },
+ _b_it{ std::move(b_it) },
+ _c_it{ std::move(c_it) },
+ _a_offset_it{ std::move(a_offset_it) },
+ _b_offset_it{ std::move(b_offset_it) },
+ _c_offset_it{ std::move(c_offset_it) },
+ _c_mult_int_it{ std::move(c_mult_int_it) },
+ _out_shift_it{ std::move(out_shift_it) }
+ {
+ }
+
+ std::string description() const
+ {
+ std::stringstream description;
+ description << "A=" << *_a_it << ":";
+ description << "B=" << *_b_it << ":";
+ description << "C=" << *_c_it << ":";
+ description << "a_offset=" << *_a_offset_it << ":";
+ description << "b_offset=" << *_b_offset_it << ":";
+ description << "c_offset=" << *_c_offset_it << ":";
+ description << "c_mult_int=" << *_c_mult_int_it << ":";
+ description << "out_shift=" << *_out_shift_it << ":";
+ return description.str();
+ }
+
+ GEMMLowpDataset::type operator*() const
+ {
+ return std::make_tuple(*_a_it, *_b_it, *_c_it, *_a_offset_it, *_b_offset_it, *_c_offset_it, *_c_mult_int_it, *_out_shift_it);
+ }
+
+ iterator &operator++()
+ {
+ ++_a_it;
+ ++_b_it;
+ ++_c_it;
+ ++_a_offset_it;
+ ++_b_offset_it;
+ ++_c_offset_it;
+ ++_c_mult_int_it;
+ ++_out_shift_it;
+
+ return *this;
+ }
+
+ private:
+ std::vector<TensorShape>::const_iterator _a_it;
+ std::vector<TensorShape>::const_iterator _b_it;
+ std::vector<TensorShape>::const_iterator _c_it;
+ std::vector<int32_t>::const_iterator _a_offset_it;
+ std::vector<int32_t>::const_iterator _b_offset_it;
+ std::vector<int32_t>::const_iterator _c_offset_it;
+ std::vector<int32_t>::const_iterator _c_mult_int_it;
+ std::vector<int32_t>::const_iterator _out_shift_it;
+ };
+
+ iterator begin() const
+ {
+ return iterator(_a_shapes.begin(), _b_shapes.begin(), _c_shapes.begin(), _a_offset.begin(), _b_offset.begin(), _c_offset.begin(), _c_mult_int.begin(), _out_shift.begin());
+ }
+
+ int size() const
+ {
+ return std::min(_a_shapes.size(), std::min(_b_shapes.size(), std::min(_c_shapes.size(), std::min(_a_offset.size(), std::min(_b_offset.size(), std::min(_c_offset.size(), std::min(_c_mult_int.size(),
+ _out_shift.size())))))));
+ }
+
+ void add_config(TensorShape a, TensorShape b, TensorShape c, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift)
+ {
+ _a_shapes.emplace_back(std::move(a));
+ _b_shapes.emplace_back(std::move(b));
+ _c_shapes.emplace_back(std::move(c));
+ _a_offset.emplace_back(std::move(a_offset));
+ _b_offset.emplace_back(std::move(b_offset));
+ _c_offset.emplace_back(std::move(c_offset));
+ _c_mult_int.emplace_back(std::move(c_mult_int));
+ _out_shift.emplace_back(std::move(out_shift));
+ }
+
+protected:
+ GEMMLowpDataset() = default;
+ GEMMLowpDataset(GEMMLowpDataset &&) = default;
+
+private:
+ std::vector<TensorShape> _a_shapes{};
+ std::vector<TensorShape> _b_shapes{};
+ std::vector<TensorShape> _c_shapes{};
+ std::vector<int32_t> _a_offset{};
+ std::vector<int32_t> _b_offset{};
+ std::vector<int32_t> _c_offset{};
+ std::vector<int32_t> _c_mult_int{};
+ std::vector<int32_t> _out_shift{};
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_GEMMLOWP_DATASET */
diff --git a/tests/datasets/LargeGEMMLowpDataset.h b/tests/datasets/LargeGEMMLowpDataset.h
new file mode 100644
index 0000000000..610a999dd8
--- /dev/null
+++ b/tests/datasets/LargeGEMMLowpDataset.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_LARGE_GEMMLOWP_DATASET
+#define ARM_COMPUTE_TEST_LARGE_GEMMLOWP_DATASET
+
+#include "tests/datasets/GEMMLowpDataset.h"
+
+#include "utils/TypePrinter.h"
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class LargeGEMMLowpDataset final : public GEMMLowpDataset
+{
+public:
+ LargeGEMMLowpDataset()
+ {
+ add_config(TensorShape(923U, 429U), TensorShape(871U, 923U), TensorShape(871U, 429U), 0, 0, 0, 1, 0);
+ add_config(TensorShape(1021U, 973U), TensorShape(783U, 1021U), TensorShape(783U, 973U), 5, 13, -6, 2, 2);
+ add_config(TensorShape(681U, 1023U), TensorShape(213U, 681U), TensorShape(213U, 1023U), -3, -2, 8, 4, 3);
+ add_config(TensorShape(941U, 1011U), TensorShape(623U, 941U), TensorShape(623U, 1011U), -9, 1, -3, 3, 1);
+ }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_LARGE_GEMMLOWP_DATASET */
diff --git a/tests/datasets/SmallGEMMLowpDataset.h b/tests/datasets/SmallGEMMLowpDataset.h
new file mode 100644
index 0000000000..2ed7a759ea
--- /dev/null
+++ b/tests/datasets/SmallGEMMLowpDataset.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_SMALL_GEMMLOWP_DATASET
+#define ARM_COMPUTE_TEST_SMALL_GEMMLOWP_DATASET
+
+#include "tests/datasets/GEMMLowpDataset.h"
+
+#include "utils/TypePrinter.h"
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class SmallGEMMLowpDataset final : public GEMMLowpDataset
+{
+public:
+ SmallGEMMLowpDataset()
+ {
+ add_config(TensorShape(21U, 13U), TensorShape(33U, 21U), TensorShape(33U, 13U), 0, 0, 0, 1, 0);
+ add_config(TensorShape(31U, 27U), TensorShape(23U, 31U), TensorShape(23U, 27U), 5, 13, -6, 2, 2);
+ add_config(TensorShape(38U, 12U), TensorShape(21U, 38U), TensorShape(21U, 12U), -3, -2, 8, 4, 3);
+ add_config(TensorShape(32U, 72U), TensorShape(17U, 32U), TensorShape(17U, 72U), -9, 1, -3, 3, 1);
+ }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_SMALL_GEMMLOWP_DATASET */
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index 6a972eb5fc..dede3aef29 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -26,6 +26,8 @@
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
+#include "tests/datasets/LargeGEMMLowpDataset.h"
+#include "tests/datasets/SmallGEMMLowpDataset.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
@@ -44,8 +46,6 @@ namespace validation
{
namespace
{
-constexpr AbsoluteTolerance<float> tolerance_f(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
-
const auto data_mnk = framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K", 12, 15);
const auto data_offsets = framework::dataset::make("a", -3, 3) * framework::dataset::make("b", -1, 2) * framework::dataset::make("c", 1, 3) * framework::dataset::make("cm", 0,
3)
@@ -69,7 +69,7 @@ using NEGEMMInterleaveBlockedFixture = GEMMInterleaveBlockedValidationFixture<Te
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleaveBlockedFixture, framework::DatasetMode::PRECOMMIT, data_int_blk)
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_f);
+ validate(Accessor(_target), _reference);
}
TEST_SUITE_END()
@@ -79,16 +79,41 @@ using NEGEMMInterleaveBlockedTransposedFixture = GEMMInterleaveBlockedValidation
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleaveBlockedTransposedFixture, framework::DatasetMode::PRECOMMIT, data_int_blk_tr)
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_f);
+ validate(Accessor(_target), _reference);
}
TEST_SUITE_END()
using NEGEMMLowpOffsetFixture = GEMMLowpOffsetValidationFixture<Tensor, Accessor, NEGEMMLowp>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpOffsetFixture, framework::DatasetMode::PRECOMMIT, data_mnk *data_offsets)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMLowpDataset(), datasets::LargeGEMMLowpDataset()), framework::dataset::make("DataType",
+ DataType::U8)),
+ shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift, data_type)
+{
+ // Create tensors
+ Tensor a = create_tensor<Tensor>(shape_a, data_type);
+ Tensor b = create_tensor<Tensor>(shape_b, data_type);
+ Tensor c = create_tensor<Tensor>(shape_c, data_type);
+
+ ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ NEGEMMLowp gemmlowp;
+ gemmlowp.configure(&a, &b, &c, a_offset, b_offset, c_offset, c_mult_int, out_shift);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpOffsetFixture, framework::DatasetMode::ALL, combine(datasets::SmallGEMMLowpDataset(), framework::dataset::make("DataType", DataType::U8)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpOffsetFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeGEMMLowpDataset(), framework::dataset::make("DataType", DataType::U8)))
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_f);
+ validate(Accessor(_target), _reference);
}
TEST_SUITE_END()
@@ -100,7 +125,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpFixture, framework::DatasetMode::PREC
16))
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_f);
+ validate(Accessor(_target), _reference);
}
TEST_SUITE_END()
#endif // defined(__aarch64__)
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index 556b6c4725..1a4084fdc2 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -47,13 +47,10 @@ class GEMMLowpOffsetValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(size_t m, size_t n, size_t k, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift)
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift, DataType data_type)
{
- const TensorShape shape_a(k, m);
- const TensorShape shape_b(n, k);
- const TensorShape shape_c(n, m);
- _target = compute_target(shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift);
- _reference = compute_reference(shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift);
+ _target = compute_target(shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift, data_type);
+ _reference = compute_reference(shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift, data_type);
}
protected:
@@ -66,12 +63,12 @@ protected:
}
TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
- int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift)
+ int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift, DataType data_type)
{
// Create tensors
- TensorType a = create_tensor<TensorType>(shape_a, DataType::U8, 1);
- TensorType b = create_tensor<TensorType>(shape_b, DataType::U8, 1);
- TensorType c = create_tensor<TensorType>(shape_c, DataType::U8, 1);
+ TensorType a = create_tensor<TensorType>(shape_a, data_type, 1);
+ TensorType b = create_tensor<TensorType>(shape_b, data_type, 1);
+ TensorType c = create_tensor<TensorType>(shape_c, data_type, 1);
// Create and configure function
FunctionType gemmlowp;
@@ -101,12 +98,12 @@ protected:
}
SimpleTensor<uint8_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
- int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift)
+ int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift, DataType data_type)
{
// Create reference
- SimpleTensor<uint8_t> a{ shape_a, DataType::U8, 1 };
- SimpleTensor<uint8_t> b{ shape_b, DataType::U8, 1 };
- SimpleTensor<uint8_t> c{ shape_c, DataType::U8, 1 };
+ SimpleTensor<uint8_t> a{ shape_a, data_type, 1 };
+ SimpleTensor<uint8_t> b{ shape_b, data_type, 1 };
+ SimpleTensor<uint8_t> c{ shape_c, data_type, 1 };
// Fill reference
fill(a, 0);