aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2017-09-29 16:43:25 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitbf2fb95c99ebd215b3c0d93cb970461185ef9716 (patch)
treeef9ea161a5b4bf04d057681eb435605f3d1fa5ab /tests
parentdd715f2a88827241a3fb9e4a2d8be82455f649f7 (diff)
downloadComputeLibrary-bf2fb95c99ebd215b3c0d93cb970461185ef9716.tar.gz
COMPMID-481: Add gemmlowp_aarch64_v8p4 kernel.
Change-Id: I15496b16ffd636f5bff76572e750df7e15c80830 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/90532 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/NEON/Helper.h16
-rw-r--r--tests/benchmark/NEON/GEMMLowp.cpp65
-rw-r--r--tests/benchmark/fixtures/GEMMLowpFixture.h125
-rw-r--r--tests/validation/CPP/GEMMInterleaveBlocked.h82
-rw-r--r--tests/validation/CPP/GEMMLowp.cpp36
-rw-r--r--tests/validation/CPP/GEMMLowp.h2
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp44
-rw-r--r--tests/validation/fixtures/GEMMInterleaveBlockedFixture.h114
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h75
9 files changed, 558 insertions, 1 deletions
diff --git a/tests/NEON/Helper.h b/tests/NEON/Helper.h
index 4efab17fca..8bd11cc57b 100644
--- a/tests/NEON/Helper.h
+++ b/tests/NEON/Helper.h
@@ -25,6 +25,8 @@
#define __ARM_COMPUTE_TEST_NEON_HELPER_H__
#include "arm_compute/runtime/Array.h"
+#include "arm_compute/runtime/NEON/INESimpleFunction.h"
+#include "support/ToolchainSupport.h"
#include "tests/Globals.h"
#include <algorithm>
@@ -48,6 +50,20 @@ void fill_tensors(D &&dist, std::initializer_list<int> seeds, T &&tensor, Ts &&.
}
}
+// This template synthetizes an INESimpleFunction which runs the given kernel K
+template <typename K>
+class NESynthetizeFunction : public INESimpleFunction
+{
+public:
+ template <typename... Args>
+ void configure(Args &&... args)
+ {
+ auto k = arm_compute::support::cpp14::make_unique<K>();
+ k->configure(std::forward<Args>(args)...);
+ _kernel = std::move(k);
+ }
+};
+
} // namespace test
} // namespace arm_compute
#endif /* __ARM_COMPUTE_TEST_NEON_HELPER_H__ */
diff --git a/tests/benchmark/NEON/GEMMLowp.cpp b/tests/benchmark/NEON/GEMMLowp.cpp
new file mode 100644
index 0000000000..8cf143393d
--- /dev/null
+++ b/tests/benchmark/NEON/GEMMLowp.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEGEMMLowp.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/benchmark/fixtures/GEMMLowpFixture.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "utils/TypePrinter.h"
+
+#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
+#include "tests/NEON/Helper.h"
+
+namespace arm_compute
+{
+namespace test
+{
+const auto data_int_blk = framework::dataset::make("M", 800) * framework::dataset::make("N", 800) * framework::dataset::make("by", 8, 13) * framework::dataset::make("block", 4, 9);
+
+TEST_SUITE(NEON)
+
+TEST_SUITE(INTERLEAVE_BLOCKED)
+using NEInterleaveBlocked = NESynthetizeFunction<NEGEMMInterleaveBlockedKernel>;
+using NEGEMMInterleaveBlockedFixture = GEMMInterleaveBlockedFixture<Tensor, NEInterleaveBlocked, Accessor>;
+REGISTER_FIXTURE_DATA_TEST_CASE(InterleaveBlocked, NEGEMMInterleaveBlockedFixture, framework::DatasetMode::ALL, data_int_blk);
+TEST_SUITE_END()
+
+#if 0 //FIXME: enable when we update NEGEMMLowp interface to work without offsets
+TEST_SUITE(U32)
+using NEGEMMLowpFixture = GEMMLowpFixture<Tensor, NEGEMMLowp, Accessor>;
+REGISTER_FIXTURE_DATA_TEST_CASE(GEMMLowp, NEGEMMLowpFixture, framework::DatasetMode::ALL, framework::dataset::make("M", 100, 120) * framework::dataset::make("N", 100,
+ 110)
+ * framework::dataset::make("K", 16, 20));
+
+TEST_SUITE_END()
+#endif // defined(__aarch64__)
+
+TEST_SUITE_END()
+
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/benchmark/fixtures/GEMMLowpFixture.h b/tests/benchmark/fixtures/GEMMLowpFixture.h
new file mode 100644
index 0000000000..b640705990
--- /dev/null
+++ b/tests/benchmark/fixtures/GEMMLowpFixture.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_GEMMFIXTURE
+#define ARM_COMPUTE_TEST_GEMMFIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/Globals.h"
+#include "tests/Utils.h"
+#include "tests/framework/Fixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+template <typename TensorType, typename Function, typename Accessor, bool Transposed = false>
+class GEMMInterleaveBlockedFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(size_t x, size_t y, int int_by, int block)
+ {
+ const float interleave_by_f32 = int_by;
+ const TensorShape shape_a(x, y);
+ const TensorShape shape_b(static_cast<size_t>(x * interleave_by_f32), static_cast<size_t>(std::ceil(y / interleave_by_f32)));
+ // Create tensors
+ a = create_tensor<TensorType>(shape_a, DataType::U8, 1);
+ b = create_tensor<TensorType>(shape_b, DataType::U8, 1);
+
+ // Create and configure function
+ f.configure(&a, &b, int_by, block, Transposed);
+
+ // Allocate tensors
+ a.allocator()->allocate();
+ b.allocator()->allocate();
+ }
+ void run()
+ {
+ f.run();
+ }
+
+ void teardown()
+ {
+ a.allocator()->free();
+ b.allocator()->free();
+ }
+
+private:
+ TensorType a{};
+ TensorType b{};
+ Function f{};
+};
+
+/** Fixture that can be used for NEON and CL */
+template <typename TensorType, typename Function, typename Accessor>
+class GEMMLowpFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(size_t m, size_t n, size_t k)
+ {
+ const TensorShape shape_a(k, m);
+ const TensorShape shape_b(n, k);
+ const TensorShape shape_c(n, m);
+ // Create tensors
+ a = create_tensor<TensorType>(shape_a, DataType::U8, 1);
+ b = create_tensor<TensorType>(shape_b, DataType::U8, 1);
+ c = create_tensor<TensorType>(shape_c, DataType::U32, 1);
+
+ // Create and configure function
+ gemmlowp.configure(&a, &b, &c);
+
+ // Allocate tensors
+ a.allocator()->allocate();
+ b.allocator()->allocate();
+ c.allocator()->allocate();
+
+ // Fill tensors
+ library->fill_tensor_uniform(Accessor(a), 0);
+ library->fill_tensor_uniform(Accessor(b), 1);
+ library->fill_tensor_uniform(Accessor(c), 2);
+ }
+ void run()
+ {
+ gemmlowp.run();
+ }
+
+ void teardown()
+ {
+ a.allocator()->free();
+ b.allocator()->free();
+ c.allocator()->free();
+ }
+
+private:
+ TensorType a{};
+ TensorType b{};
+ TensorType c{};
+ Function gemmlowp{};
+};
+
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_GEMMFIXTURE */
diff --git a/tests/validation/CPP/GEMMInterleaveBlocked.h b/tests/validation/CPP/GEMMInterleaveBlocked.h
new file mode 100644
index 0000000000..ff5a0d647c
--- /dev/null
+++ b/tests/validation/CPP/GEMMInterleaveBlocked.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "GEMM.h"
+
+#include "arm_compute/core/Types.h"
+#include "tests/validation/FixedPoint.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+T safe_read(const SimpleTensor<T> &t, int y, int x)
+{
+ const int stride = t.shape().x();
+ const int M = t.shape().y();
+ const int N = t.shape().x();
+ if((y < M) && (x < N))
+ {
+ return t[y * stride + x];
+ }
+ return 0;
+}
+
+template <typename T>
+SimpleTensor<T> gemm_interleave_blocked(const SimpleTensor<T> &in, SimpleTensor<T> &out, int int_by, int block, bool transposed)
+{
+ const int M = out.shape().y();
+ const int N = out.shape().x();
+ for(int y = 0; y < M; y++)
+ {
+ T *out_ptr = &out[y * N];
+ for(int x = 0; x < (N / int_by); x += block)
+ {
+ for(int z = 0; z < int_by; z++)
+ {
+ for(int a = 0; (out_ptr <= &out[y * N + (N - 1)]) && a < block; a++)
+ {
+ if(!transposed)
+ *out_ptr++ = safe_read(in, (y * int_by) + z, x + a);
+ else
+ {
+ const T value = safe_read(in, x + a, (y * int_by) + z);
+ *out_ptr++ = value;
+ }
+ }
+ }
+ }
+ }
+ return out;
+}
+
+template SimpleTensor<uint8_t> gemm_interleave_blocked(const SimpleTensor<uint8_t> &in, SimpleTensor<uint8_t> &out, int int_by, int block, bool transposed);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/CPP/GEMMLowp.cpp b/tests/validation/CPP/GEMMLowp.cpp
index d172a773b6..06926e631e 100644
--- a/tests/validation/CPP/GEMMLowp.cpp
+++ b/tests/validation/CPP/GEMMLowp.cpp
@@ -34,6 +34,42 @@ namespace validation
{
namespace reference
{
+SimpleTensor<uint32_t> gemmlowp(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, SimpleTensor<uint32_t> &c)
+{
+ ARM_COMPUTE_UNUSED(a);
+ ARM_COMPUTE_UNUSED(b);
+ ARM_COMPUTE_UNUSED(c);
+ const int K = a.shape().x();
+ const int b_width = b.shape().x();
+ const int rows = c.shape().y(); //M
+ const int cols = c.shape().x(); //N
+ std::vector<int32_t> acc;
+ acc.resize(cols);
+ for(int i = 0; i < rows; ++i)
+ {
+ for(int j = 0; j < cols; ++j)
+ {
+ acc[j] = 0;
+ }
+ for(int k = 0; k < K; ++k)
+ {
+ auto tmp_a = static_cast<int32_t>(a[k + i * K]);
+ for(int j = 0; j < b_width; ++j)
+ {
+ auto tmp_b = static_cast<int32_t>(b[j + k * b_width]);
+ const int32_t mult_as_int = tmp_a * tmp_b;
+ acc[j] += mult_as_int;
+ }
+ }
+ for(int j = 0; j < cols; ++j)
+ {
+ c[j + i * cols] = acc[j];
+ }
+ }
+
+ return c;
+}
+
template <typename T>
SimpleTensor<T> gemmlowp(const SimpleTensor<T> &a, const SimpleTensor<T> &b, SimpleTensor<T> &c,
int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift)
diff --git a/tests/validation/CPP/GEMMLowp.h b/tests/validation/CPP/GEMMLowp.h
index 216097562e..0428e9e34f 100644
--- a/tests/validation/CPP/GEMMLowp.h
+++ b/tests/validation/CPP/GEMMLowp.h
@@ -35,6 +35,8 @@ namespace validation
{
namespace reference
{
+SimpleTensor<uint32_t> gemmlowp(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, SimpleTensor<uint32_t> &c);
+
template <typename T>
SimpleTensor<T> gemmlowp(const SimpleTensor<T> &a, const SimpleTensor<T> &b, SimpleTensor<T> &c,
int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift);
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index 3d83f8046f..045d334896 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -30,8 +30,12 @@
#include "tests/framework/Macros.h"
#include "tests/framework/datasets/Datasets.h"
#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/GEMMInterleaveBlockedFixture.h"
#include "tests/validation/fixtures/GEMMLowpFixture.h"
+#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h"
+#include "tests/NEON/Helper.h"
+
namespace arm_compute
{
namespace test
@@ -42,17 +46,44 @@ namespace
{
constexpr AbsoluteTolerance<float> tolerance_f(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
-const auto data_mnk = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12) * framework::dataset::make("K", 8, 12);
+const auto data_mnk = framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K", 12, 15);
const auto data_offsets = framework::dataset::make("a", -3, 3) * framework::dataset::make("b", -1, 2) * framework::dataset::make("c", 1, 3) * framework::dataset::make("cm", 0,
3)
* framework::dataset::make("shift", 0, 4);
+const auto data_int_blk = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12) * framework::dataset::make("by", 8, 13) * framework::dataset::make("block", 4, 9);
+
+const auto data_int_blk_tr = framework::dataset::make("M", 8, 17) * framework::dataset::make("N", 8, 14) * framework::dataset::make("by", 12) * framework::dataset::make("block", 4);
+
} // namespace
TEST_SUITE(NEON)
TEST_SUITE(GEMMLowp)
TEST_SUITE(U8)
+
+TEST_SUITE(INTERLEAVE_BLOCKED)
+
+using NEInterleaveBlocked = NESynthetizeFunction<NEGEMMInterleaveBlockedKernel>;
+using NEGEMMInterleaveBlockedFixture = GEMMInterleaveBlockedValidationFixture<Tensor, Accessor, NEInterleaveBlocked>;
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleaveBlockedFixture, framework::DatasetMode::PRECOMMIT, data_int_blk)
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_f);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(INTERLEAVE_BLOCKED_TRANSPOSED)
+using NEInterleaveBlockedTransposed = NESynthetizeFunction<NEGEMMInterleaveBlockedKernel>;
+using NEGEMMInterleaveBlockedTransposedFixture = GEMMInterleaveBlockedValidationFixture<Tensor, Accessor, NEInterleaveBlockedTransposed, true>;
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleaveBlockedTransposedFixture, framework::DatasetMode::PRECOMMIT, data_int_blk_tr)
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_f);
+}
+
+TEST_SUITE_END()
+
using NEGEMMLowpOffsetFixture = GEMMLowpOffsetValidationFixture<Tensor, Accessor, NEGEMMLowp>;
FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpOffsetFixture, framework::DatasetMode::PRECOMMIT, data_mnk *data_offsets)
{
@@ -61,6 +92,17 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpOffsetFixture, framework::DatasetMode
}
TEST_SUITE_END()
+#if defined(__aarch64__)
+TEST_SUITE(U32)
+using NEGEMMLowpFixture = GEMMLowpValidationFixture<Tensor, Accessor, NEGEMMLowp>;
+FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpFixture, framework::DatasetMode::PRECOMMIT, framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K",
+ 16))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_f);
+}
+TEST_SUITE_END()
+#endif // defined(__aarch64__)
TEST_SUITE_END()
TEST_SUITE_END()
} // namespace validation
diff --git a/tests/validation/fixtures/GEMMInterleaveBlockedFixture.h b/tests/validation/fixtures/GEMMInterleaveBlockedFixture.h
new file mode 100644
index 0000000000..89c188f6a6
--- /dev/null
+++ b/tests/validation/fixtures/GEMMInterleaveBlockedFixture.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_GEMM_INTERLEAVE_BLOCKED_FIXTURE
+#define ARM_COMPUTE_TEST_GEMM_INTERLEAVE_BLOCKED_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/CPP/GEMMInterleaveBlocked.h"
+#include "tests/validation/Helpers.h"
+
+#include <random>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, bool Transposed = false>
+class GEMMInterleaveBlockedValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(size_t x, size_t y, int int_by, int block)
+ {
+ const float interleave_by_f32 = int_by;
+ const TensorShape shape_a(x, y);
+ const TensorShape shape_b(static_cast<size_t>(x * interleave_by_f32), static_cast<size_t>(std::ceil(y / interleave_by_f32)));
+ _target = compute_target(shape_a, shape_b, int_by, block);
+ _reference = compute_reference(shape_a, shape_b, int_by, block);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i)
+ {
+ ARM_COMPUTE_ERROR_ON(tensor.data_type() != DataType::U8);
+ std::uniform_int_distribution<> distribution(0, 255);
+ library->fill(tensor, distribution, i);
+ }
+
+ TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, int int_by, int block)
+ {
+ // Create tensors
+ TensorType a = create_tensor<TensorType>(shape_a, DataType::U8, 1);
+ TensorType b = create_tensor<TensorType>(shape_b, DataType::U8, 1);
+
+ // Create and configure function
+ FunctionType f;
+ f.configure(&a, &b, int_by, block, Transposed);
+
+ ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ a.allocator()->allocate();
+ b.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(a), 0);
+
+ // Compute GEMM function
+ f.run();
+ return b;
+ }
+
+ SimpleTensor<uint8_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, int int_by, int block)
+ {
+ // Create reference
+ SimpleTensor<uint8_t> a{ shape_a, DataType::U8, 1 };
+ SimpleTensor<uint8_t> b{ shape_b, DataType::U8, 1 };
+
+ // Fill reference
+ fill(a, 0);
+ return reference::gemm_interleave_blocked<uint8_t>(a, b, int_by, block, Transposed);
+ }
+
+ TensorType _target{};
+ SimpleTensor<uint8_t> _reference{};
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_GEMM_INTERLEAVE_BLOCKED_FIXTURE */
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index c972469e59..556b6c4725 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -120,6 +120,81 @@ protected:
SimpleTensor<uint8_t> _reference{};
};
+template <typename TensorType, typename AccessorType, typename FunctionType>
+class GEMMLowpValidationFixture : public framework::Fixture
+{
+public:
+ template <typename...>
+ void setup(size_t m, size_t n, size_t k)
+ {
+ const TensorShape shape_a(k, m);
+ const TensorShape shape_b(n, k);
+ const TensorShape shape_c(n, m);
+ _target = compute_target(shape_a, shape_b, shape_c);
+ _reference = compute_reference(shape_a, shape_b, shape_c);
+ }
+
+protected:
+ template <typename U>
+ void fill(U &&tensor, int i, int lo, int hi)
+ {
+ std::uniform_int_distribution<> distribution(lo, hi);
+ library->fill(tensor, distribution, i);
+ }
+
+ TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
+ {
+ // Create tensors
+ TensorType a = create_tensor<TensorType>(shape_a, DataType::U8, 1);
+ TensorType b = create_tensor<TensorType>(shape_b, DataType::U8, 1);
+ TensorType c = create_tensor<TensorType>(shape_c, DataType::U32, 1);
+
+ // Create and configure function
+ FunctionType gemmlowp;
+ gemmlowp.configure(&a, &b, &c);
+
+ ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Allocate tensors
+ a.allocator()->allocate();
+ b.allocator()->allocate();
+ c.allocator()->allocate();
+
+ ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+ // Fill tensors
+ fill(AccessorType(a), 0, 0, 3);
+ fill(AccessorType(b), 1, 0, 3);
+ fill(AccessorType(c), 2, 0, 0);
+
+ // Compute GEMM function
+ gemmlowp.run();
+ return c;
+ }
+
+ SimpleTensor<uint32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
+ {
+ // Create reference
+ SimpleTensor<uint8_t> a{ shape_a, DataType::U8, 1 };
+ SimpleTensor<uint8_t> b{ shape_b, DataType::U8, 1 };
+ SimpleTensor<uint32_t> c{ shape_c, DataType::U32, 1 };
+
+ // Fill reference
+ fill(a, 0, 0, 3);
+ fill(b, 1, 0, 3);
+ fill(c, 2, 0, 0);
+
+ return reference::gemmlowp(a, b, c);
+ }
+
+ TensorType _target{};
+ SimpleTensor<uint32_t> _reference{};
+};
+
} // namespace validation
} // namespace test
} // namespace arm_compute