From bf2fb95c99ebd215b3c0d93cb970461185ef9716 Mon Sep 17 00:00:00 2001 From: Pablo Tello Date: Fri, 29 Sep 2017 16:43:25 +0100 Subject: COMPMID-481: Add gemmlowp_aarch64_v8p4 kernel. Change-Id: I15496b16ffd636f5bff76572e750df7e15c80830 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/90532 Reviewed-by: Anthony Barbier Tested-by: Kaizen --- tests/NEON/Helper.h | 16 +++ tests/benchmark/NEON/GEMMLowp.cpp | 65 +++++++++++ tests/benchmark/fixtures/GEMMLowpFixture.h | 125 +++++++++++++++++++++ tests/validation/CPP/GEMMInterleaveBlocked.h | 82 ++++++++++++++ tests/validation/CPP/GEMMLowp.cpp | 36 ++++++ tests/validation/CPP/GEMMLowp.h | 2 + tests/validation/NEON/GEMMLowp.cpp | 44 +++++++- .../fixtures/GEMMInterleaveBlockedFixture.h | 114 +++++++++++++++++++ tests/validation/fixtures/GEMMLowpFixture.h | 75 +++++++++++++ 9 files changed, 558 insertions(+), 1 deletion(-) create mode 100644 tests/benchmark/NEON/GEMMLowp.cpp create mode 100644 tests/benchmark/fixtures/GEMMLowpFixture.h create mode 100644 tests/validation/CPP/GEMMInterleaveBlocked.h create mode 100644 tests/validation/fixtures/GEMMInterleaveBlockedFixture.h (limited to 'tests') diff --git a/tests/NEON/Helper.h b/tests/NEON/Helper.h index 4efab17fca..8bd11cc57b 100644 --- a/tests/NEON/Helper.h +++ b/tests/NEON/Helper.h @@ -25,6 +25,8 @@ #define __ARM_COMPUTE_TEST_NEON_HELPER_H__ #include "arm_compute/runtime/Array.h" +#include "arm_compute/runtime/NEON/INESimpleFunction.h" +#include "support/ToolchainSupport.h" #include "tests/Globals.h" #include @@ -48,6 +50,20 @@ void fill_tensors(D &&dist, std::initializer_list seeds, T &&tensor, Ts &&. } } +// This template synthetizes an INESimpleFunction which runs the given kernel K +template +class NESynthetizeFunction : public INESimpleFunction +{ +public: + template + void configure(Args &&... args) + { + auto k = arm_compute::support::cpp14::make_unique(); + k->configure(std::forward(args)...); + _kernel = std::move(k); + } +}; + } // namespace test } // namespace arm_compute #endif /* __ARM_COMPUTE_TEST_NEON_HELPER_H__ */ diff --git a/tests/benchmark/NEON/GEMMLowp.cpp b/tests/benchmark/NEON/GEMMLowp.cpp new file mode 100644 index 0000000000..8cf143393d --- /dev/null +++ b/tests/benchmark/NEON/GEMMLowp.cpp @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/runtime/NEON/functions/NEGEMMLowp.h" +#include "arm_compute/runtime/Tensor.h" +#include "arm_compute/runtime/TensorAllocator.h" +#include "tests/NEON/Accessor.h" +#include "tests/benchmark/fixtures/GEMMLowpFixture.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "utils/TypePrinter.h" + +#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h" +#include "tests/NEON/Helper.h" + +namespace arm_compute +{ +namespace test +{ +const auto data_int_blk = framework::dataset::make("M", 800) * framework::dataset::make("N", 800) * framework::dataset::make("by", 8, 13) * framework::dataset::make("block", 4, 9); + +TEST_SUITE(NEON) + +TEST_SUITE(INTERLEAVE_BLOCKED) +using NEInterleaveBlocked = NESynthetizeFunction; +using NEGEMMInterleaveBlockedFixture = GEMMInterleaveBlockedFixture; +REGISTER_FIXTURE_DATA_TEST_CASE(InterleaveBlocked, NEGEMMInterleaveBlockedFixture, framework::DatasetMode::ALL, data_int_blk); +TEST_SUITE_END() + +#if 0 //FIXME: enable when we update NEGEMMLowp interface to work without offsets +TEST_SUITE(U32) +using NEGEMMLowpFixture = GEMMLowpFixture; +REGISTER_FIXTURE_DATA_TEST_CASE(GEMMLowp, NEGEMMLowpFixture, framework::DatasetMode::ALL, framework::dataset::make("M", 100, 120) * framework::dataset::make("N", 100, + 110) + * framework::dataset::make("K", 16, 20)); + +TEST_SUITE_END() +#endif // defined(__aarch64__) + +TEST_SUITE_END() + +} // namespace test +} // namespace arm_compute diff --git a/tests/benchmark/fixtures/GEMMLowpFixture.h b/tests/benchmark/fixtures/GEMMLowpFixture.h new file mode 100644 index 0000000000..b640705990 --- /dev/null +++ b/tests/benchmark/fixtures/GEMMLowpFixture.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_GEMMFIXTURE +#define ARM_COMPUTE_TEST_GEMMFIXTURE + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "tests/Globals.h" +#include "tests/Utils.h" +#include "tests/framework/Fixture.h" + +namespace arm_compute +{ +namespace test +{ +template +class GEMMInterleaveBlockedFixture : public framework::Fixture +{ +public: + template + void setup(size_t x, size_t y, int int_by, int block) + { + const float interleave_by_f32 = int_by; + const TensorShape shape_a(x, y); + const TensorShape shape_b(static_cast(x * interleave_by_f32), static_cast(std::ceil(y / interleave_by_f32))); + // Create tensors + a = create_tensor(shape_a, DataType::U8, 1); + b = create_tensor(shape_b, DataType::U8, 1); + + // Create and configure function + f.configure(&a, &b, int_by, block, Transposed); + + // Allocate tensors + a.allocator()->allocate(); + b.allocator()->allocate(); + } + void run() + { + f.run(); + } + + void teardown() + { + a.allocator()->free(); + b.allocator()->free(); + } + +private: + TensorType a{}; + TensorType b{}; + Function f{}; +}; + +/** Fixture that can be used for NEON and CL */ +template +class GEMMLowpFixture : public framework::Fixture +{ +public: + template + void setup(size_t m, size_t n, size_t k) + { + const TensorShape shape_a(k, m); + const TensorShape shape_b(n, k); + const TensorShape shape_c(n, m); + // Create tensors + a = create_tensor(shape_a, DataType::U8, 1); + b = create_tensor(shape_b, DataType::U8, 1); + c = create_tensor(shape_c, DataType::U32, 1); + + // Create and configure function + gemmlowp.configure(&a, &b, &c); + + // Allocate tensors + a.allocator()->allocate(); + b.allocator()->allocate(); + c.allocator()->allocate(); + + // Fill tensors + library->fill_tensor_uniform(Accessor(a), 0); + library->fill_tensor_uniform(Accessor(b), 1); + library->fill_tensor_uniform(Accessor(c), 2); + } + void run() + { + gemmlowp.run(); + } + + void teardown() + { + a.allocator()->free(); + b.allocator()->free(); + c.allocator()->free(); + } + +private: + TensorType a{}; + TensorType b{}; + TensorType c{}; + Function gemmlowp{}; +}; + +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_GEMMFIXTURE */ diff --git a/tests/validation/CPP/GEMMInterleaveBlocked.h b/tests/validation/CPP/GEMMInterleaveBlocked.h new file mode 100644 index 0000000000..ff5a0d647c --- /dev/null +++ b/tests/validation/CPP/GEMMInterleaveBlocked.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "GEMM.h" + +#include "arm_compute/core/Types.h" +#include "tests/validation/FixedPoint.h" + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +namespace reference +{ +template +T safe_read(const SimpleTensor &t, int y, int x) +{ + const int stride = t.shape().x(); + const int M = t.shape().y(); + const int N = t.shape().x(); + if((y < M) && (x < N)) + { + return t[y * stride + x]; + } + return 0; +} + +template +SimpleTensor gemm_interleave_blocked(const SimpleTensor &in, SimpleTensor &out, int int_by, int block, bool transposed) +{ + const int M = out.shape().y(); + const int N = out.shape().x(); + for(int y = 0; y < M; y++) + { + T *out_ptr = &out[y * N]; + for(int x = 0; x < (N / int_by); x += block) + { + for(int z = 0; z < int_by; z++) + { + for(int a = 0; (out_ptr <= &out[y * N + (N - 1)]) && a < block; a++) + { + if(!transposed) + *out_ptr++ = safe_read(in, (y * int_by) + z, x + a); + else + { + const T value = safe_read(in, x + a, (y * int_by) + z); + *out_ptr++ = value; + } + } + } + } + } + return out; +} + +template SimpleTensor gemm_interleave_blocked(const SimpleTensor &in, SimpleTensor &out, int int_by, int block, bool transposed); +} // namespace reference +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/CPP/GEMMLowp.cpp b/tests/validation/CPP/GEMMLowp.cpp index d172a773b6..06926e631e 100644 --- a/tests/validation/CPP/GEMMLowp.cpp +++ b/tests/validation/CPP/GEMMLowp.cpp @@ -34,6 +34,42 @@ namespace validation { namespace reference { +SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, SimpleTensor &c) +{ + ARM_COMPUTE_UNUSED(a); + ARM_COMPUTE_UNUSED(b); + ARM_COMPUTE_UNUSED(c); + const int K = a.shape().x(); + const int b_width = b.shape().x(); + const int rows = c.shape().y(); //M + const int cols = c.shape().x(); //N + std::vector acc; + acc.resize(cols); + for(int i = 0; i < rows; ++i) + { + for(int j = 0; j < cols; ++j) + { + acc[j] = 0; + } + for(int k = 0; k < K; ++k) + { + auto tmp_a = static_cast(a[k + i * K]); + for(int j = 0; j < b_width; ++j) + { + auto tmp_b = static_cast(b[j + k * b_width]); + const int32_t mult_as_int = tmp_a * tmp_b; + acc[j] += mult_as_int; + } + } + for(int j = 0; j < cols; ++j) + { + c[j + i * cols] = acc[j]; + } + } + + return c; +} + template SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, SimpleTensor &c, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift) diff --git a/tests/validation/CPP/GEMMLowp.h b/tests/validation/CPP/GEMMLowp.h index 216097562e..0428e9e34f 100644 --- a/tests/validation/CPP/GEMMLowp.h +++ b/tests/validation/CPP/GEMMLowp.h @@ -35,6 +35,8 @@ namespace validation { namespace reference { +SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, SimpleTensor &c); + template SimpleTensor gemmlowp(const SimpleTensor &a, const SimpleTensor &b, SimpleTensor &c, int32_t a_offset, int32_t b_offset, int32_t c_offset, int32_t c_mult_int, int32_t out_shift); diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp index 3d83f8046f..045d334896 100644 --- a/tests/validation/NEON/GEMMLowp.cpp +++ b/tests/validation/NEON/GEMMLowp.cpp @@ -30,8 +30,12 @@ #include "tests/framework/Macros.h" #include "tests/framework/datasets/Datasets.h" #include "tests/validation/Validation.h" +#include "tests/validation/fixtures/GEMMInterleaveBlockedFixture.h" #include "tests/validation/fixtures/GEMMLowpFixture.h" +#include "arm_compute/core/NEON/kernels/NEGEMMInterleaveBlockedKernel.h" +#include "tests/NEON/Helper.h" + namespace arm_compute { namespace test @@ -42,17 +46,44 @@ namespace { constexpr AbsoluteTolerance tolerance_f(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */ -const auto data_mnk = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12) * framework::dataset::make("K", 8, 12); +const auto data_mnk = framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K", 12, 15); const auto data_offsets = framework::dataset::make("a", -3, 3) * framework::dataset::make("b", -1, 2) * framework::dataset::make("c", 1, 3) * framework::dataset::make("cm", 0, 3) * framework::dataset::make("shift", 0, 4); +const auto data_int_blk = framework::dataset::make("M", 8, 12) * framework::dataset::make("N", 8, 12) * framework::dataset::make("by", 8, 13) * framework::dataset::make("block", 4, 9); + +const auto data_int_blk_tr = framework::dataset::make("M", 8, 17) * framework::dataset::make("N", 8, 14) * framework::dataset::make("by", 12) * framework::dataset::make("block", 4); + } // namespace TEST_SUITE(NEON) TEST_SUITE(GEMMLowp) TEST_SUITE(U8) + +TEST_SUITE(INTERLEAVE_BLOCKED) + +using NEInterleaveBlocked = NESynthetizeFunction; +using NEGEMMInterleaveBlockedFixture = GEMMInterleaveBlockedValidationFixture; +FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleaveBlockedFixture, framework::DatasetMode::PRECOMMIT, data_int_blk) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f); +} +TEST_SUITE_END() + +TEST_SUITE(INTERLEAVE_BLOCKED_TRANSPOSED) +using NEInterleaveBlockedTransposed = NESynthetizeFunction; +using NEGEMMInterleaveBlockedTransposedFixture = GEMMInterleaveBlockedValidationFixture; +FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMInterleaveBlockedTransposedFixture, framework::DatasetMode::PRECOMMIT, data_int_blk_tr) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f); +} + +TEST_SUITE_END() + using NEGEMMLowpOffsetFixture = GEMMLowpOffsetValidationFixture; FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpOffsetFixture, framework::DatasetMode::PRECOMMIT, data_mnk *data_offsets) { @@ -61,6 +92,17 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpOffsetFixture, framework::DatasetMode } TEST_SUITE_END() +#if defined(__aarch64__) +TEST_SUITE(U32) +using NEGEMMLowpFixture = GEMMLowpValidationFixture; +FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMLowpFixture, framework::DatasetMode::PRECOMMIT, framework::dataset::make("M", 12, 20) * framework::dataset::make("N", 12, 20) * framework::dataset::make("K", + 16)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f); +} +TEST_SUITE_END() +#endif // defined(__aarch64__) TEST_SUITE_END() TEST_SUITE_END() } // namespace validation diff --git a/tests/validation/fixtures/GEMMInterleaveBlockedFixture.h b/tests/validation/fixtures/GEMMInterleaveBlockedFixture.h new file mode 100644 index 0000000000..89c188f6a6 --- /dev/null +++ b/tests/validation/fixtures/GEMMInterleaveBlockedFixture.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_TEST_GEMM_INTERLEAVE_BLOCKED_FIXTURE +#define ARM_COMPUTE_TEST_GEMM_INTERLEAVE_BLOCKED_FIXTURE + +#include "arm_compute/core/TensorShape.h" +#include "arm_compute/core/Types.h" +#include "tests/AssetsLibrary.h" +#include "tests/Globals.h" +#include "tests/IAccessor.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/validation/CPP/GEMMInterleaveBlocked.h" +#include "tests/validation/Helpers.h" + +#include + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template +class GEMMInterleaveBlockedValidationFixture : public framework::Fixture +{ +public: + template + void setup(size_t x, size_t y, int int_by, int block) + { + const float interleave_by_f32 = int_by; + const TensorShape shape_a(x, y); + const TensorShape shape_b(static_cast(x * interleave_by_f32), static_cast(std::ceil(y / interleave_by_f32))); + _target = compute_target(shape_a, shape_b, int_by, block); + _reference = compute_reference(shape_a, shape_b, int_by, block); + } + +protected: + template + void fill(U &&tensor, int i) + { + ARM_COMPUTE_ERROR_ON(tensor.data_type() != DataType::U8); + std::uniform_int_distribution<> distribution(0, 255); + library->fill(tensor, distribution, i); + } + + TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, int int_by, int block) + { + // Create tensors + TensorType a = create_tensor(shape_a, DataType::U8, 1); + TensorType b = create_tensor(shape_b, DataType::U8, 1); + + // Create and configure function + FunctionType f; + f.configure(&a, &b, int_by, block, Transposed); + + ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Allocate tensors + a.allocator()->allocate(); + b.allocator()->allocate(); + + ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Fill tensors + fill(AccessorType(a), 0); + + // Compute GEMM function + f.run(); + return b; + } + + SimpleTensor compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, int int_by, int block) + { + // Create reference + SimpleTensor a{ shape_a, DataType::U8, 1 }; + SimpleTensor b{ shape_b, DataType::U8, 1 }; + + // Fill reference + fill(a, 0); + return reference::gemm_interleave_blocked(a, b, int_by, block, Transposed); + } + + TensorType _target{}; + SimpleTensor _reference{}; +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute +#endif /* ARM_COMPUTE_TEST_GEMM_INTERLEAVE_BLOCKED_FIXTURE */ diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h index c972469e59..556b6c4725 100644 --- a/tests/validation/fixtures/GEMMLowpFixture.h +++ b/tests/validation/fixtures/GEMMLowpFixture.h @@ -120,6 +120,81 @@ protected: SimpleTensor _reference{}; }; +template +class GEMMLowpValidationFixture : public framework::Fixture +{ +public: + template + void setup(size_t m, size_t n, size_t k) + { + const TensorShape shape_a(k, m); + const TensorShape shape_b(n, k); + const TensorShape shape_c(n, m); + _target = compute_target(shape_a, shape_b, shape_c); + _reference = compute_reference(shape_a, shape_b, shape_c); + } + +protected: + template + void fill(U &&tensor, int i, int lo, int hi) + { + std::uniform_int_distribution<> distribution(lo, hi); + library->fill(tensor, distribution, i); + } + + TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c) + { + // Create tensors + TensorType a = create_tensor(shape_a, DataType::U8, 1); + TensorType b = create_tensor(shape_b, DataType::U8, 1); + TensorType c = create_tensor(shape_c, DataType::U32, 1); + + // Create and configure function + FunctionType gemmlowp; + gemmlowp.configure(&a, &b, &c); + + ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Allocate tensors + a.allocator()->allocate(); + b.allocator()->allocate(); + c.allocator()->allocate(); + + ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS); + + // Fill tensors + fill(AccessorType(a), 0, 0, 3); + fill(AccessorType(b), 1, 0, 3); + fill(AccessorType(c), 2, 0, 0); + + // Compute GEMM function + gemmlowp.run(); + return c; + } + + SimpleTensor compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c) + { + // Create reference + SimpleTensor a{ shape_a, DataType::U8, 1 }; + SimpleTensor b{ shape_b, DataType::U8, 1 }; + SimpleTensor c{ shape_c, DataType::U32, 1 }; + + // Fill reference + fill(a, 0, 0, 3); + fill(b, 1, 0, 3); + fill(c, 2, 0, 0); + + return reference::gemmlowp(a, b, c); + } + + TensorType _target{}; + SimpleTensor _reference{}; +}; + } // namespace validation } // namespace test } // namespace arm_compute -- cgit v1.2.1