aboutsummaryrefslogtreecommitdiff
path: root/tests/benchmark/fixtures/GEMMFixture.h
diff options
context:
space:
mode:
authorGian Marco <gianmarco.iodice@arm.com>2018-02-05 15:07:36 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:47:18 +0000
commit1aede367b9ecab66dfaf6fb07f95720064afdea4 (patch)
tree4ff21a0bd82bef963cd7b7a8013c0dceedb78fd5 /tests/benchmark/fixtures/GEMMFixture.h
parent1167487ea8e54a76d0a3625e0aa84e2ad9ffd317 (diff)
downloadComputeLibrary-1aede367b9ecab66dfaf6fb07f95720064afdea4.tar.gz
COMPMID-765 - Extended GEMM benchmark
Added new benchmarks GEMM in order to evaluate the performance when the input matrix B has to be reshaped only once Change-Id: I1c4790213704ce57ea7b28f6f362c56edccd1eb9 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/118910 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/benchmark/fixtures/GEMMFixture.h')
-rw-r--r--tests/benchmark/fixtures/GEMMFixture.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/tests/benchmark/fixtures/GEMMFixture.h b/tests/benchmark/fixtures/GEMMFixture.h
index e958d4fdde..3a308d9ada 100644
--- a/tests/benchmark/fixtures/GEMMFixture.h
+++ b/tests/benchmark/fixtures/GEMMFixture.h
@@ -40,7 +40,7 @@ class GEMMFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape shape_dst, float alpha, float beta, DataType data_type)
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape shape_dst, float alpha, float beta, DataType data_type, bool reshape_b_only_on_first_run)
{
constexpr int fixed_point_position = 4;
@@ -51,7 +51,7 @@ public:
dst = create_tensor<TensorType>(shape_dst, data_type, 1, fixed_point_position);
// Create and configure function
- gemm.configure(&a, &b, &c, &dst, alpha, beta);
+ gemm.configure(&a, &b, &c, &dst, alpha, beta, GEMMInfo(false, false, reshape_b_only_on_first_run));
// Allocate tensors
a.allocator()->allocate();