aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
authorGian Marco <gianmarco.iodice@arm.com>2017-10-18 17:05:02 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitfa4cacdff825a38eac31ef7ecd3ad6b30da53eaa (patch)
tree15982731a9b70660f9b501f0c6bb259e2e4115ff /tests/validation
parent13fc22c3c3f609489e53ec706026a2a7991bf367 (diff)
downloadComputeLibrary-fa4cacdff825a38eac31ef7ecd3ad6b30da53eaa.tar.gz
COMPMID-636 - Extending GEMMLowp validation for NEON intrinsics
Change-Id: Id02a0b3bf5af65dd940b46b2f6634b6a479cf388 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/92275 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp39
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h25
2 files changed, 43 insertions, 21 deletions
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index 6a972eb5fc..dede3aef29 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -26,6 +26,8 @@
#include "arm_compute/runtime/Tensor.h"
#include "arm_compute/runtime/TensorAllocator.h"
#include "tests/NEON/Accessor.h"
+#include "tests/datasets/LargeGEMMLowpDataset.h"
+#include "tests/datasets/SmallGEMMLowpDataset.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
@@ -44,8 +46,6 @@ namespace validation
{
namespace
{
-constexpr AbsoluteTolerance<float> tolerance_f(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
-
const auto data_mnk = framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K", 12, 15);
const auto data_offsets = framework::dataset::make("a", -3, 3) * framework::dataset::make("b", -1, 2) * framework::dataset::make("c", 1, 3) * framework::dataset::make("cm", 0,
3)
@@ -69,7 +69,7 @@ using NEGEMMInterleaveBlockedFixture = GEMMInterleaveBlockedValidationFixture<Te
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleaveBlockedFixture, framework::DatasetMode::PRECOMMIT, data_int_blk)
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_f);
+ validate(Accessor(_target), _reference);
}
TEST_SUITE_END()
@@ -79,16 +79,41 @@ using NEGEMMInterleaveBlockedTransposedFixture = GEMMInterleaveBlockedValidation
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleaveBlockedTransposedFixture, framework::DatasetMode::PRECOMMIT, data_int_blk_tr)
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_f);
+ validate(Accessor(_target), _reference);
}
TEST_SUITE_END()
using NEGEMMLowpOffsetFixture = GEMMLowpOffsetValidationFixture<Tensor, Accessor, NEGEMMLowp>;
-FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpOffsetFixture, framework::DatasetMode::PRECOMMIT, data_mnk *data_offsets)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallGEMMLowpDataset(), datasets::LargeGEMMLowpDataset()), framework::dataset::make("DataType",
+ DataType::U8)),
+ shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift, data_type)
+{
+ // Create tensors
+ Tensor a = create_tensor<Tensor>(shape_a, data_type);
+ Tensor b = create_tensor<Tensor>(shape_b, data_type);
+ Tensor c = create_tensor<Tensor>(shape_c, data_type);
+
+ ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Create and configure function
+ NEGEMMLowp gemmlowp;
+ gemmlowp.configure(&a, &b, &c, a_offset, b_offset, c_offset, c_mult_int, out_shift);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpOffsetFixture, framework::DatasetMode::ALL, combine(datasets::SmallGEMMLowpDataset(), framework::dataset::make("DataType", DataType::U8)))
+{
+ // Validate output
+ validate(Accessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, NEGEMMLowpOffsetFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeGEMMLowpDataset(), framework::dataset::make("DataType", DataType::U8)))
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_f);
+ validate(Accessor(_target), _reference);
}
TEST_SUITE_END()
@@ -100,7 +125,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpFixture, framework::DatasetMode::PREC
16))
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_f);
+ validate(Accessor(_target), _reference);
}
TEST_SUITE_END()
#endif // defined(__aarch64__)
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index 556b6c4725..1a4084fdc2 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -47,13 +47,10 @@ class GEMMLowpOffsetValidationFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(size_t m, size_t n, size_t k, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift)
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift, DataType data_type)
{
- const TensorShape shape_a(k, m);
- const TensorShape shape_b(n, k);
- const TensorShape shape_c(n, m);
- _target = compute_target(shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift);
- _reference = compute_reference(shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift);
+ _target = compute_target(shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift, data_type);
+ _reference = compute_reference(shape_a, shape_b, shape_c, a_offset, b_offset, c_offset, c_mult_int, out_shift, data_type);
}
protected:
@@ -66,12 +63,12 @@ protected:
}
TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
- int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift)
+ int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift, DataType data_type)
{
// Create tensors
- TensorType a = create_tensor<TensorType>(shape_a, DataType::U8, 1);
- TensorType b = create_tensor<TensorType>(shape_b, DataType::U8, 1);
- TensorType c = create_tensor<TensorType>(shape_c, DataType::U8, 1);
+ TensorType a = create_tensor<TensorType>(shape_a, data_type, 1);
+ TensorType b = create_tensor<TensorType>(shape_b, data_type, 1);
+ TensorType c = create_tensor<TensorType>(shape_c, data_type, 1);
// Create and configure function
FunctionType gemmlowp;
@@ -101,12 +98,12 @@ protected:
}
SimpleTensor<uint8_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c,
- int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift)
+ int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift, DataType data_type)
{
// Create reference
- SimpleTensor<uint8_t> a{ shape_a, DataType::U8, 1 };
- SimpleTensor<uint8_t> b{ shape_b, DataType::U8, 1 };
- SimpleTensor<uint8_t> c{ shape_c, DataType::U8, 1 };
+ SimpleTensor<uint8_t> a{ shape_a, data_type, 1 };
+ SimpleTensor<uint8_t> b{ shape_b, data_type, 1 };
+ SimpleTensor<uint8_t> c{ shape_c, data_type, 1 };
// Fill reference
fill(a, 0);