From 146138378c1587b7297d245b7177641315f6180b Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Fri, 1 Mar 2019 19:07:11 +0000 Subject: COMPMID-1995: Update RSH GEMM assembly kernels. -Updates u8/s8 hybrid dot product kernels to work for any N and any K >=16. -Adds hybrid FP32 kernels with generic and A55 variants. -Adds SVE native kernels for fp16/u8/s8. Change-Id: Ifc0eaba9e3c8ea5bb19d334e870e1b39e4e7e728 Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/863 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins Reviewed-by: Michalis Spyrou --- .../core/NEON/kernels/assembly/arm_gemm.hpp | 22 ++++++++--------- .../core/NEON/kernels/assembly/gemm_common.hpp | 28 +++++++++++----------- 2 files changed, 25 insertions(+), 25 deletions(-) (limited to 'arm_compute') diff --git a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp index 26c1f3df89..1b511ba79a 100644 --- a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp +++ b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp @@ -42,13 +42,13 @@ enum class GemmMethod GEMM_INTERLEAVED }; - struct KernelDescription { - GemmMethod method = GemmMethod::DEFAULT; - std::string name = ""; + GemmMethod method = GemmMethod::DEFAULT; + std::string name = ""; + bool is_default = false; - KernelDescription(GemmMethod m, std::string n) : method(m), name(n) { } + KernelDescription(GemmMethod m, std::string n, bool d=false) : method(m), name(n), is_default(d) { } KernelDescription() { } }; @@ -166,16 +166,16 @@ UniqueGemmCommon gemm(const CPUInfo &ci, } template -std::vector get_compatible_kernels(const GemmArgs &args); +std::vector get_compatible_kernels(const GemmArgs &args); template -std::vector get_compatible_kernels(const CPUInfo &ci, - const unsigned int M, const unsigned int N, const unsigned int K, - const unsigned int nbatches, const unsigned int nmulti, - const bool trA, const bool trB, const Tret alpha, const Tret beta, - const int maxthreads, const bool pretransposed_hint) +std::vector get_compatible_kernels(const CPUInfo &ci, + const unsigned int M, const unsigned int N, const unsigned int K, + const unsigned int nbatches, const unsigned int nmulti, + const bool trA, const bool trB, const Tret alpha, const Tret beta, + const int maxthreads, const bool pretransposed_hint, GemmConfig *cfg=nullptr) { - GemmArgs args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint); + GemmArgs args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint, cfg); return get_compatible_kernels(args); } diff --git a/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp b/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp index c72f210e56..bb32fea9da 100644 --- a/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp +++ b/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp @@ -39,14 +39,14 @@ namespace arm_gemm { class IGemmCommon { public: /* Pass in the pointers to the arrays to be operated on and their - * strides. In the interface class these are passed as void pointers - - * the templated version overloads this function with a version which - * takes appropriately typed pointers. If B is pretransposed (see - * below) then the settings for B here are ignored. + * strides. This "generic" version uses void *s, the preferred version + * is the one provided by templated GemmCommon (below) which takes + * appropriately typed pointers. If B is pretransposed (see below) then + * the settings for B here are ignored. */ - virtual void set_arrays(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, - const void *B, const int ldb, /* batches share B */ const int B_multi_stride, - void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) = 0; + virtual void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, + const void *B, const int ldb, /* batches share B */ const int B_multi_stride, + void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) = 0; /* For threading, we divide the work into some number of units and work * out internally what unit corresponds to what work. This returns the @@ -89,7 +89,7 @@ public: virtual size_t get_B_pretransposed_array_size() const { return 0; } /* Perform pretranspose - arguments are output, input, input row stride and input multi stride. */ /* The "real" version of this depends on the templated operand type (see below). */ - virtual void pretranspose_B_array(void *, const void *, const int, const int) = 0; + virtual void pretranspose_B_array_generic(void *, const void *, const int, const int) = 0; /* Set pretransposed data - the void * passed in must previously have been passed to pretranspose_B_array() for the same or a similar GEMM. */ virtual void set_pretransposed_B_data(void *) { } @@ -125,7 +125,7 @@ public: * strides (templated version with appropriate types). */ virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride, const To *B, const int ldb, /* batches share B */ const int B_multi_stride, - Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride) { + Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride) { _Aptr = A; _lda = lda; _A_batch_stride = A_batch_stride; @@ -140,9 +140,9 @@ public: } /* Implementation of the void * overload which casts its arguments to the appropriate type. */ - void set_arrays(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, - const void *B, const int ldb, /* batches share B */ const int B_multi_stride, - void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) override { + void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, + const void *B, const int ldb, /* batches share B */ const int B_multi_stride, + void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) override { set_arrays(static_cast(A), lda, A_batch_stride, A_multi_stride, static_cast(B), ldb, B_multi_stride, static_cast(C), ldc, C_batch_stride, C_multi_stride); @@ -155,10 +155,10 @@ public: virtual void pretranspose_B_array(void *, const To *, const int, const int) { }; /* Implementation of the void * overload which casts its arguments to the appropriate type. */ - void pretranspose_B_array(void *out, const void *in, const int row_stride, const int multi_stride) override { + void pretranspose_B_array_generic(void *out, const void *in, const int row_stride, const int multi_stride) override { pretranspose_B_array(out, static_cast(in), row_stride, multi_stride); } }; -} // namespace arm_gemm \ No newline at end of file +} // namespace arm_gemm -- cgit v1.2.1