aboutsummaryrefslogtreecommitdiff
path: root/tests/benchmark/fixtures/GEMMLowpFixture.h
diff options
context:
space:
mode:
authorGian Marco <gianmarco.iodice@arm.com>2017-11-30 14:31:13 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:17 +0000
commitc7f9b893b8edc5660542821e2d0508460bc40225 (patch)
tree594456a7da9335bebda56498cfbb39be3a9609a2 /tests/benchmark/fixtures/GEMMLowpFixture.h
parent23ac91b6ba235e67847802d4b49e494fa5bedbb6 (diff)
downloadComputeLibrary-c7f9b893b8edc5660542821e2d0508460bc40225.tar.gz
COMPMID-722 - Support for vector-matrix in GEMMLowp (NEON)
This patch includes COMPMID-716 as well - Added vector-matrix case in NEGEMMLowpMatrixMultiplyKernel - Added benchmarks for NEON and OpenCL Change-Id: I715cd25e8668a4d6c8127e9a298a865e7713267f Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/111468 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'tests/benchmark/fixtures/GEMMLowpFixture.h')
-rw-r--r--tests/benchmark/fixtures/GEMMLowpFixture.h66
1 files changed, 16 insertions, 50 deletions
diff --git a/tests/benchmark/fixtures/GEMMLowpFixture.h b/tests/benchmark/fixtures/GEMMLowpFixture.h
index b640705990..4bd7dfd42f 100644
--- a/tests/benchmark/fixtures/GEMMLowpFixture.h
+++ b/tests/benchmark/fixtures/GEMMLowpFixture.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_TEST_GEMMFIXTURE
-#define ARM_COMPUTE_TEST_GEMMFIXTURE
+#ifndef ARM_COMPUTE_TEST_GEMMLOWPFIXTURE
+#define ARM_COMPUTE_TEST_GEMMLOWPFIXTURE
#include "arm_compute/core/TensorShape.h"
#include "arm_compute/core/Types.h"
@@ -34,59 +34,26 @@ namespace arm_compute
{
namespace test
{
-template <typename TensorType, typename Function, typename Accessor, bool Transposed = false>
-class GEMMInterleaveBlockedFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(size_t x, size_t y, int int_by, int block)
- {
- const float interleave_by_f32 = int_by;
- const TensorShape shape_a(x, y);
- const TensorShape shape_b(static_cast<size_t>(x * interleave_by_f32), static_cast<size_t>(std::ceil(y / interleave_by_f32)));
- // Create tensors
- a = create_tensor<TensorType>(shape_a, DataType::U8, 1);
- b = create_tensor<TensorType>(shape_b, DataType::U8, 1);
-
- // Create and configure function
- f.configure(&a, &b, int_by, block, Transposed);
-
- // Allocate tensors
- a.allocator()->allocate();
- b.allocator()->allocate();
- }
- void run()
- {
- f.run();
- }
-
- void teardown()
- {
- a.allocator()->free();
- b.allocator()->free();
- }
-
-private:
- TensorType a{};
- TensorType b{};
- Function f{};
-};
-
/** Fixture that can be used for NEON and CL */
template <typename TensorType, typename Function, typename Accessor>
-class GEMMLowpFixture : public framework::Fixture
+class GEMMLowpMatrixMultiplyCoreFixture : public framework::Fixture
{
public:
template <typename...>
- void setup(size_t m, size_t n, size_t k)
+ void setup(TensorShape shape_a, TensorShape shape_b, TensorShape shape_c, TensorShape shape_dst, float alpha, float beta)
{
- const TensorShape shape_a(k, m);
- const TensorShape shape_b(n, k);
- const TensorShape shape_c(n, m);
+ // TODO (COMPMID-717): The interface used for GEMMLowp is the same one used for GEMM in order to re-use the datasets
+ // However the interface for both GEMM and GEMMLowp should be reworked in order to accepts only the 3 dimensions M, N and K
+ ARM_COMPUTE_UNUSED(shape_c);
+ ARM_COMPUTE_UNUSED(alpha);
+ ARM_COMPUTE_UNUSED(beta);
+
+ // Note: The offsets for matrix A and matrix B are set to 0 in order to skip the computation for the offset contribution
+
// Create tensors
- a = create_tensor<TensorType>(shape_a, DataType::U8, 1);
- b = create_tensor<TensorType>(shape_b, DataType::U8, 1);
- c = create_tensor<TensorType>(shape_c, DataType::U32, 1);
+ a = create_tensor<TensorType>(shape_a, DataType::QASYMM8, 1, 0, QuantizationInfo(1.0f / 255.0f, 0));
+ b = create_tensor<TensorType>(shape_b, DataType::QASYMM8, 1, 0, QuantizationInfo(1.0f / 255.0f, 0));
+ c = create_tensor<TensorType>(shape_dst, DataType::S32, 1, 0, QuantizationInfo(1.0f / 255.0f, 0));
// Create and configure function
gemmlowp.configure(&a, &b, &c);
@@ -99,7 +66,6 @@ public:
// Fill tensors
library->fill_tensor_uniform(Accessor(a), 0);
library->fill_tensor_uniform(Accessor(b), 1);
- library->fill_tensor_uniform(Accessor(c), 2);
}
void run()
{
@@ -122,4 +88,4 @@ private:
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_GEMMFIXTURE */
+#endif /* ARM_COMPUTE_TEST_GEMMLOWPFIXTURE */