aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2018-10-18 10:21:02 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:55:45 +0000
commit4b90865ab985d571f70c60583cdfb8c7a65f1670 (patch)
treef116a4ffef5f5e823689dd00c1e5c9d987f3d295 /tests
parentc55beee7ef70fa08a5d217619083b288a74fcb27 (diff)
downloadComputeLibrary-4b90865ab985d571f70c60583cdfb8c7a65f1670.tar.gz
COMPMID-1413 - Improve the performance of GEMMLowp with 8 bit dot product on OpenCL
COMPMID-1424 - Add dot product support for CLDepthwise QASYMM8 3x3 NHWC non-unit stride With this patch we are able to improve the performance of MobileNet v1-qasymm8 by 37 % Tried to use the dot product instruction in CLDepthwise QASYMM8 3x3 NHWC non-unit stride but I have not seen any benefit (maybe because we have few arithemtic operation and we do not have more load instructions). However Depthwise convolution has been improved by 30% Change-Id: Id768a99c2e53a04276707e427af5d0ec93419ada Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/155082 Tested-by: bsgcomp <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/benchmark/fixtures/GEMMLowpFixture.h2
-rw-r--r--tests/validate_examples/cl_gemm.cpp2
-rw-r--r--tests/validation/CL/GEMMLowp.cpp7
-rw-r--r--tests/validation/NEON/GEMMLowp.cpp3
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h3
5 files changed, 10 insertions, 7 deletions
diff --git a/tests/benchmark/fixtures/GEMMLowpFixture.h b/tests/benchmark/fixtures/GEMMLowpFixture.h
index 46a2f5cc6a..33c6415d20 100644
--- a/tests/benchmark/fixtures/GEMMLowpFixture.h
+++ b/tests/benchmark/fixtures/GEMMLowpFixture.h
@@ -58,7 +58,7 @@ public:
c = create_tensor<TensorType>(shape_dst, DataType::S32, 1, QuantizationInfo(1.0f / 255.0f, 0));
// Create and configure function
- gemmlowp.configure(&a, &b, &c);
+ gemmlowp.configure(&a, &b, nullptr, &c);
// Allocate tensors
a.allocator()->allocate();
diff --git a/tests/validate_examples/cl_gemm.cpp b/tests/validate_examples/cl_gemm.cpp
index cdaa33f31a..8b3a103db7 100644
--- a/tests/validate_examples/cl_gemm.cpp
+++ b/tests/validate_examples/cl_gemm.cpp
@@ -193,7 +193,7 @@ public:
init_sgemm_output(tmp_dst, src0, src1, DataType::S32);
// Configure GEMMlowp matrix multiply function
- mm_gemmlowp.configure(&src0, &src1, &tmp_dst);
+ mm_gemmlowp.configure(&src0, &src1, nullptr, &tmp_dst);
// Configure GEMMlowp output stage
mm_gemmlowp_output_stage.configure(&tmp_dst, add_bias ? &biases : nullptr, &dst, dst_multiplier, dst_shift, offset_dst);
diff --git a/tests/validation/CL/GEMMLowp.cpp b/tests/validation/CL/GEMMLowp.cpp
index 42bb2123bf..f0f768dd1b 100644
--- a/tests/validation/CL/GEMMLowp.cpp
+++ b/tests/validation/CL/GEMMLowp.cpp
@@ -67,7 +67,8 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::c
// Create and configure function
CLGEMMLowpMatrixMultiplyCore gemmlowp_mm;
- gemmlowp_mm.configure(&a, &b, &c);
+ // TODO (giaiod01) COMPMID-1672 - Extending the test to validate add bias in offset contribution
+ gemmlowp_mm.configure(&a, &b, nullptr, &c);
}
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMLowpMatrixMultiplyCoreFixture, framework::DatasetMode::ALL, datasets::SmallGEMMLowpDataset())
@@ -155,7 +156,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
}
// Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
+ const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding();
validate(in.info()->padding(), padding);
validate(out.info()->padding(), padding);
@@ -238,7 +239,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::da
}
// Validate padding
- const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
+ const PaddingSize padding = PaddingCalculator(shape.x(), 4).required_padding();
validate(in.info()->padding(), padding);
validate(out.info()->padding(), padding);
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index 9eba3c85c1..1458c9fdc3 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -95,7 +95,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::c
// Create and configure function
NEGEMMLowpMatrixMultiplyCore gemmlowp_mm;
- gemmlowp_mm.configure(&a, &b, &c);
+ gemmlowp_mm.configure(&a, &b, nullptr, &c);
}
// *INDENT-OFF*
@@ -125,6 +125,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
// Lock tensors
Status status = NEGEMMLowpMatrixMultiplyCore::validate(&a_info.clone()->set_is_resizable(false),
&b_info.clone()->set_is_resizable(false),
+ nullptr,
&output_info.clone()->set_is_resizable(false));
ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
}
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index 73cb8328ea..b61b4eca38 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -75,7 +75,8 @@ protected:
// Create and configure function
// The GEMMinfo includes the values of the depth in case of reinterpreted 3d input/output
FunctionType gemmlowp;
- gemmlowp.configure(&a, &b, &c, GEMMInfo(false, false, false, (reinterpret_output_as_3d ? shape_c[2] : 1), reinterpret_input_as_3d));
+ // TODO (COMPMID-1672) - Extending the test to validate add bias in offset contribution
+ gemmlowp.configure(&a, &b, nullptr, &c, GEMMInfo(false, false, false, (reinterpret_output_as_3d ? shape_c[2] : 1), reinterpret_input_as_3d));
ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);