From cfa2bba98169cb5ab1945462514be1b6badf7d98 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Thu, 27 Jun 2019 17:00:52 +0100 Subject: COMPMID-2178: Update GEMM assembly code. Perform offset reduction and requantization within the assembly wrapper. Change-Id: I5d5b3e1f6f9ef4c71805362c57f88ff199c027a3 Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/1541 Comments-Addressed: Pablo Marquez Reviewed-by: Gian Marco Iodice Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- .../core/NEON/kernels/assembly/arm_gemm.hpp | 116 +++++++-------------- .../core/NEON/kernels/assembly/gemm_common.hpp | 10 +- 2 files changed, 43 insertions(+), 83 deletions(-) (limited to 'arm_compute/core') diff --git a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp index 1b511ba79a..828b0f20a7 100644 --- a/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp +++ b/arm_compute/core/NEON/kernels/assembly/arm_gemm.hpp @@ -39,7 +39,9 @@ enum class GemmMethod GEMV_NATIVE_TRANSPOSED, GEMM_NATIVE, GEMM_HYBRID, - GEMM_INTERLEAVED + GEMM_INTERLEAVED, + QUANTIZE_WRAPPER, + GEMM_HYBRID_QUANTIZED }; struct KernelDescription @@ -86,98 +88,52 @@ public: const unsigned int nmulti, const bool trA, const bool trB, const T alpha, const T beta, const int maxthreads, const bool pretransposed_hint, const GemmConfig *cfg=nullptr ) : - _ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _nbatches(nbatches), _nmulti(nmulti), - _trA(trA), _trB(trB), _alpha(alpha), _beta(beta), _maxthreads(maxthreads), - _pretransposed_hint(pretransposed_hint), _cfg(cfg) + _ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _nbatches(nbatches), _nmulti(nmulti), + _trA(trA), _trB(trB), _alpha(alpha), _beta(beta), _maxthreads(maxthreads), + _pretransposed_hint(pretransposed_hint), _cfg(cfg) { } }; +struct ARequantizeLayer32 +{ +public: + const int32_t *bias; + int32_t a_offset; + int32_t b_offset; + int32_t c_offset; + int32_t requant_shift; + int32_t requant_mul; + int32_t minval; + int32_t maxval; + + ARequantizeLayer32() = default; + + ARequantizeLayer32(int32_t *b, int32_t ao, int32_t bo, int32_t co, int32_t rs, int32_t rm, int32_t minv, int32_t maxv) : + bias(b), a_offset(ao), b_offset(bo), c_offset(co), requant_shift(rs), requant_mul(rm), minval(minv), maxval(maxv) + { + } +}; + +struct Nothing +{ +}; + template using UniqueGemmCommon = std::unique_ptr >; /* Low level API calls. * These are implemented as 'GemmArgs' versions, or with the arguments explicitly listed. */ -/* method_is_compatible(): Can a GEMM of the templated types with the - * provided parameters be provided using the supplied method? */ - -template -bool method_is_compatible(GemmMethod method, const GemmArgs &args); - -template -bool method_is_compatible(GemmMethod method, const CPUInfo &ci, - const unsigned int M, const unsigned int N, const unsigned int K, - const unsigned int nbatches, const unsigned int nmulti, - const bool trA, const bool trB, const Tret alpha, const Tret beta, - const int maxthreads, const bool pretransposed_hint) -{ - GemmArgs args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint); - - return method_is_compatible(method, args); -} - /* get_gemm_method(): Given the templated types and provided parameters, * which is the preferred method to implement this GEMM? */ -template -KernelDescription get_gemm_method(const GemmArgs &args); +template +KernelDescription get_gemm_method(const GemmArgs &args, const OutputStage & ={}); -template -KernelDescription get_gemm_method(const CPUInfo &ci, - const unsigned int M, const unsigned int N, const unsigned int K, - const unsigned int nbatches, const unsigned int nmulti, - const bool trA, const bool trB, const Tret alpha, const Tret beta, - const int maxthreads, const bool pretransposed_hint) -{ - GemmArgs args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint); - - return get_gemm_method(args); -} - -template -UniqueGemmCommon gemm(const GemmArgs &args); - -/** Request an object to process a GEMM. - * - * @param[in] ci Describes CPU properties. - * @param[in] M Rows in output matrix C (and input matrix A). - * @param[in] N Columns in output matrix C (and input matrix B). - * @param[in] K Columns of input matrix A (= rows of input matrix B). - * @param[in] nbatches Number of "batched" GEMMs (unique A and C, shared B). - * @param[in] nmulti Number of "multi" GEMMs (unique A, B and C). - * @param[in] trA Does A tensor has rows and columns transposed? - * @param[in] trB Does B tensor has rows and columns transposed? - * @param[in] alpha Scalar multiplier to apply to AB matrix product. - * @param[in] beta Scalar multiplier to apply to input C matrix before adding product. - * @param[in] maxthreads Maximum (and default) number of threads that will call execute method. - * @param[in] pretransposed_hint Can the B tensor can be pretransposed (ie shared across invocations)? - * @param[in] cfg (optional) configuration parameters - */ -template -UniqueGemmCommon gemm(const CPUInfo &ci, - const unsigned int M, const unsigned int N, const unsigned int K, - const unsigned int nbatches, const unsigned int nmulti, - const bool trA, const bool trB, const Tret alpha, const Tret beta, - const int maxthreads, const bool pretransposed_hint, GemmConfig *cfg=nullptr) -{ - GemmArgs args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint, cfg); - - return gemm(args); -} - -template -std::vector get_compatible_kernels(const GemmArgs &args); - -template -std::vector get_compatible_kernels(const CPUInfo &ci, - const unsigned int M, const unsigned int N, const unsigned int K, - const unsigned int nbatches, const unsigned int nmulti, - const bool trA, const bool trB, const Tret alpha, const Tret beta, - const int maxthreads, const bool pretransposed_hint, GemmConfig *cfg=nullptr) -{ - GemmArgs args(&ci, M, N, K, nbatches, nmulti, trA, trB, alpha, beta, maxthreads, pretransposed_hint, cfg); +template +UniqueGemmCommon gemm(const GemmArgs &args, const OutputStage & ={}); - return get_compatible_kernels(args); -} +template +std::vector get_compatible_kernels(const GemmArgs &args, const OutputStage & ={}); } // namespace arm_gemm diff --git a/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp b/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp index bb32fea9da..f59a61703f 100644 --- a/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp +++ b/arm_compute/core/NEON/kernels/assembly/gemm_common.hpp @@ -46,7 +46,7 @@ public: */ virtual void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, const void *B, const int ldb, /* batches share B */ const int B_multi_stride, - void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) = 0; + void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) = 0; /* For threading, we divide the work into some number of units and work * out internally what unit corresponds to what work. This returns the @@ -93,6 +93,10 @@ public: /* Set pretransposed data - the void * passed in must previously have been passed to pretranspose_B_array() for the same or a similar GEMM. */ virtual void set_pretransposed_B_data(void *) { } + /*** "Quantized bias" interface (optional) ***/ + /* Set the bias vector for quantized GEMMs */ + virtual void set_quantized_bias(const int32_t *bias) { } + // Destructor virtual ~IGemmCommon() { } }; @@ -125,7 +129,7 @@ public: * strides (templated version with appropriate types). */ virtual void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride, const To *B, const int ldb, /* batches share B */ const int B_multi_stride, - Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride) { + Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride) { _Aptr = A; _lda = lda; _A_batch_stride = A_batch_stride; @@ -142,7 +146,7 @@ public: /* Implementation of the void * overload which casts its arguments to the appropriate type. */ void set_arrays_generic(const void *A, const int lda, const int A_batch_stride, const int A_multi_stride, const void *B, const int ldb, /* batches share B */ const int B_multi_stride, - void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) override { + void *C, const int ldc, const int C_batch_stride, const int C_multi_stride) override { set_arrays(static_cast(A), lda, A_batch_stride, A_multi_stride, static_cast(B), ldb, B_multi_stride, static_cast(C), ldc, C_batch_stride, C_multi_stride); -- cgit v1.2.1