diff options
author | Georgios Pinitas <georgios.pinitas@arm.com> | 2019-03-01 19:07:11 +0000 |
---|---|---|
committer | Georgios Pinitas <georgios.pinitas@arm.com> | 2019-03-19 11:53:45 +0000 |
commit | 146138378c1587b7297d245b7177641315f6180b (patch) | |
tree | 4689218b48a0884418099f373015429f2845ceaf /arm_compute/core/NEON/kernels/assembly | |
parent | 3dd5b6884a65c06bcb9d15589ee2dc2978e3b336 (diff) | |
download | ComputeLibrary-146138378c1587b7297d245b7177641315f6180b.tar.gz |
COMPMID-1995: Update RSH GEMM assembly kernels.
-Updates u8/s8 hybrid dot product kernels to work for any N and any K >=16.
-Adds hybrid FP32 kernels with generic and A55 variants.
-Adds SVE native kernels for fp16/u8/s8.
Change-Id: Ifc0eaba9e3c8ea5bb19d334e870e1b39e4e7e728
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/863
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Diffstat (limited to 'arm_compute/core/NEON/kernels/assembly')
-rw-r--r-- | arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp | 22 | ||||
-rw-r--r-- | arm_compute/core/NEON/kernels/assembly/gemm_common.hpp | 28 |
2 files changed, 25 insertions, 25 deletions
diff --git a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp index 26c1f3df89..1b511ba79a 100644 --- a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp +++ b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp @@ -42,13 +42,13 @@ enum class GemmMethod GEMM_INTERLEAVED }; - struct KernelDescription { - GemmMethod method = GemmMethod::DEFAULT; - std::string name = ""; + GemmMethod method = GemmMethod::DEFAULT; + std::string name = ""; + bool is_default = false; - KernelDescription(GemmMethod m, std::string n) : method(m), name(n) { } + KernelDescription(GemmMethod m, std::string n, bool d=false) : method(m), name(n), is_default(d) { } KernelDescription() { } }; @@ -166,16 +166,16 @@ UniqueGemmCommon<Top, Tret> gemm(const CPUInfo &ci, } template<typename Top, typename Tret> -std::vector<std::string> get_compatible_kernels(const GemmArgs<Tret> &args); +std::vector<KernelDescription> get_compatible_kernels(const GemmArgs<Tret> &args); template<typename Top, typename Tret> -std::vector<std::string> get_compatible_kernels(const CPUInfo &ci, - const unsigned int M, const unsigned int N, const unsigned int K, - const unsigned int nbatches, const unsigned int nmulti, - const bool trA, const bool trB, const Tret alpha, const Tret beta, - const int maxthreads, const bool pretransposed_hint) +std::vector<KernelDescription> get_compatible_kernels(const CPUInfo &ci, + const unsigned int M, const unsigned int N, const unsigned int K, + const unsigned int nbatches, const unsigned int nmulti, + const bool trA, const bool trB, const Tret alpha, const Tret beta, + const int maxthreads, const bool pretransposed_hint, GemmConfig *cfg=nullptr) { - GemmArgs<Tret> args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint); + GemmArgs<Tret> args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint, cfg); return get_compatible_kernels<Top, Tret>(args); } diff --git a/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp b/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp index c72f210e56..bb32fea9da 100644 --- a/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp +++ b/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp @@ -39,14 +39,14 @@ namespace arm_gemm { class IGemmCommon { public: /* Pass in the pointers to the arrays to be operated on and their - * strides. In the interface class these are passed as void pointers - - * the templated version overloads this function with a version which - * takes appropriately typed pointers. If B is pretransposed (see - * below) then the settings for B here are ignored. + * strides. This "generic" version uses void *s, the preferred version + * is the one provided by templated GemmCommon (below) which takes + * appropriately typed pointers. If B is pretransposed (see below) then + * the settings for B here are ignored. */ - virtual void set_arrays(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, - const void *B, const int ldb, /* batches share B */ const int B_multi_stride, - void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) = 0; + virtual void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, + const void *B, const int ldb, /* batches share B */ const int B_multi_stride, + void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) = 0; /* For threading, we divide the work into some number of units and work * out internally what unit corresponds to what work. This returns the @@ -89,7 +89,7 @@ public: virtual size_t get_B_pretransposed_array_size() const { return 0; } /* Perform pretranspose - arguments are output, input, input row stride and input multi stride. */ /* The "real" version of this depends on the templated operand type (see below). */ - virtual void pretranspose_B_array(void *, const void *, const int, const int) = 0; + virtual void pretranspose_B_array_generic(void *, const void *, const int, const int) = 0; /* Set pretransposed data - the void * passed in must previously have been passed to pretranspose_B_array() for the same or a similar GEMM. */ virtual void set_pretransposed_B_data(void *) { } @@ -125,7 +125,7 @@ public: * strides (templated version with appropriate types). */ virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride, const To *B, const int ldb, /* batches share B */ const int B_multi_stride, - Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride) { + Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride) { _Aptr = A; _lda = lda; _A_batch_stride = A_batch_stride; @@ -140,9 +140,9 @@ public: } /* Implementation of the void * overload which casts its arguments to the appropriate type. */ - void set_arrays(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, - const void *B, const int ldb, /* batches share B */ const int B_multi_stride, - void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) override { + void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, + const void *B, const int ldb, /* batches share B */ const int B_multi_stride, + void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) override { set_arrays(static_cast<const To *>(A), lda, A_batch_stride, A_multi_stride, static_cast<const To *>(B), ldb, B_multi_stride, static_cast<Tr *>(C), ldc, C_batch_stride, C_multi_stride); @@ -155,10 +155,10 @@ public: virtual void pretranspose_B_array(void *, const To *, const int, const int) { }; /* Implementation of the void * overload which casts its arguments to the appropriate type. */ - void pretranspose_B_array(void *out, const void *in, const int row_stride, const int multi_stride) override { + void pretranspose_B_array_generic(void *out, const void *in, const int row_stride, const int multi_stride) override { pretranspose_B_array(out, static_cast<const To *>(in), row_stride, multi_stride); } }; -} // namespace arm_gemm
\ No newline at end of file +} // namespace arm_gemm |