From cfa2bba98169cb5ab1945462514be1b6badf7d98 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Thu, 27 Jun 2019 17:00:52 +0100 Subject: COMPMID-2178: Update GEMM assembly code. Perform offset reduction and requantization within the assembly wrapper. Change-Id: I5d5b3e1f6f9ef4c71805362c57f88ff199c027a3 Signed-off-by: Georgios Pinitas Reviewed-on: https://review.mlplatform.org/c/1541 Comments-Addressed: Pablo Marquez Reviewed-by: Gian Marco Iodice Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins --- arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h | 13 ++++++++----- .../NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h | 10 +++++----- .../runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h | 3 ++- 3 files changed, 15 insertions(+), 11 deletions(-) (limited to 'arm_compute/runtime') diff --git a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h index b5a2978ea1..ec4f700034 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h +++ b/arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h @@ -67,6 +67,7 @@ private: * @param[in] method GemmMethod to use to perform the matrix multiplication. * @param[in] a Input tensor (Matrix A). * @param[in] b Input tensor (Matrix B). + * @param[in] c Input tensor (Matrix C) used to pass the bias for quantized calculations * @param[out] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0. * @param[in] alpha Scalar multiplier to apply to AB matrix product. * @param[in] beta Scalar multiplier to apply to input D matrix before adding product. @@ -74,7 +75,7 @@ private: * * @return True if the method is supported and the function was successfully created, false otherwise. */ - bool create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info); + bool create_function(arm_gemm::GemmMethod method, const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info); /** Interface for the arm_gemm fallback */ std::unique_ptr _arm_gemm; @@ -85,17 +86,19 @@ public: * * @param[in] a Input tensor (Matrix A) * @param[in] b Input tensor (Matrix B) + * @param[in] c Input tensor (Matrix C) used to pass the bias for quantized calculations * @param[out] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0. * @param[in] alpha Scalar multiplier to apply to AB matrix product. * @param[in] beta Scalar multiplier to apply to input D matrix before adding product. * @param[in] gemm_info GEMM meta-data */ - void configure(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info); + void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &gemm_info); /** Indicates whether or not this function can be used to process the given parameters. * - * @param[in] a Input tensor (Matrix A) - * @param[in] b Input tensor (Matrix B) + * @param[in] a Input tensor info (Matrix A) + * @param[in] b Input tensor info (Matrix B) + * @param[in] c Input tensor info (Matrix C) used to pass the bias for quantized calculations * @param[in] d Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0. * @param[in] alpha Scalar multiplier to apply to AB matrix product. * @param[in] beta Scalar multiplier to apply to input D matrix before adding product. @@ -103,7 +106,7 @@ public: * * @return a status. */ - static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info); + static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *d, float alpha, float beta, const GEMMInfo &gemm_info); /** Was the function successfully configured ? * * @return True if the function is configured and ready to run diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h index 38682843b7..dd23f1ce22 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h +++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.h @@ -35,11 +35,10 @@ namespace arm_compute { +// Forward declarations class ITensor; -/** Basic function to execute matrix multiply assembly kernels. - * -*/ +/** Basic function to execute matrix multiply assembly kernels. */ class NEGEMMLowpAssemblyMatrixMultiplyCore : public IFunction { public: @@ -49,9 +48,10 @@ public: * * @param[in] a First input tensor (Matrix A). Data type supported: U8, S8. * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a + * @param[in] c Third input tensor (Matrix C). Data type supported: same as @p a * @param[out] output Output tensor. Data type supported: Data type supported: U32, S32 */ - void configure(const ITensor *a, const ITensor *b, ITensor *output); + void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *output); // Inherited methods overridden: void run() override; @@ -65,5 +65,5 @@ private: Tensor _tmp_a; Tensor _tmp_b; }; -} +} // namespace arm_compute #endif /*__ARM_COMPUTE_NEGEMMLOWPASSEMBLYMATRIXMULTIPLYCORE_H__ */ diff --git a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h index d3b27e4faf..5b6a0dd943 100644 --- a/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h +++ b/arm_compute/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.h @@ -122,7 +122,8 @@ private: int32_t _a_offset; int32_t _b_offset; bool _run_vector_matrix_multiplication; - bool _dot_product_path; + bool _assembly_path; + bool _fused_assembly_path; bool _reshape_b_only_on_first_run; bool _is_prepared; bool _fuse_output_stage; -- cgit v1.2.1