From 1d25ed54a948639d1894c8b021940df70005d519 Mon Sep 17 00:00:00 2001 From: Gian Marco Date: Sat, 16 Dec 2017 19:33:50 +0000 Subject: COMPMID-759 - CLGEMM optimization for McVail benchmarks This patch introduces an optimization for CLGEMM on Bifrost architectures which can bring to 40% of FMA utilization on config 3 of McVail. The new CLGEMM does not require any reshape of matrix A and matrix B. This patch also adds the auto-config in CLConvolutionLayer and CLGEMM and extends the interface for NEGEMM and CLGEMM. Change-Id: Ibb354eda45e9ca64b14a99700fb21dff5989dda9 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/113716 Tested-by: Jenkins Reviewed-by: Michalis Spyrou Reviewed-by: Anthony Barbier --- arm_compute/runtime/CL/functions/CLConvolutionLayer.h | 3 +-- arm_compute/runtime/CL/functions/CLGEMM.h | 18 +++++++++++------- 2 files changed, 12 insertions(+), 9 deletions(-) (limited to 'arm_compute/runtime/CL/functions') diff --git a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h index a8a04a0bbf..3fe6604db9 100644 --- a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h +++ b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h @@ -138,10 +138,9 @@ private: CLTensor _gemm_output; CLTensor _tmp_output; - bool _append_bias; - bool _is_fully_connected_convolution; bool _are_weights_reshaped; bool _is_quantized; + bool _is_interleaved_transposed; }; } #endif /* __ARM_COMPUTE_CLCONVOLUTIONLAYER_H__ */ diff --git a/arm_compute/runtime/CL/functions/CLGEMM.h b/arm_compute/runtime/CL/functions/CLGEMM.h index 2765b77b7d..bf41226bda 100644 --- a/arm_compute/runtime/CL/functions/CLGEMM.h +++ b/arm_compute/runtime/CL/functions/CLGEMM.h @@ -61,14 +61,16 @@ public: * * @note Whilst the first input tensor can be a vector, the second input tensor must be at least a matrix * - * @param[in] a First input tensor (Matrix or Vector A). Data types supported: QS8/QS16/F16/F32 - * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a. - * @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a. - * @param[out] output Output tensor. Data type supported: same as @p a - * @param[in] alpha Weight of the matrix product - * @param[in] beta Weight of matrix C + * @param[in] a First input tensor (Matrix or Vector A). Data types supported: QS8/QS16/F16/F32 + * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a. + * @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a. + * @param[out] output Output tensor. Data type supported: same as @p a + * @param[in] alpha Weight of the matrix product + * @param[in] beta Weight of matrix C + * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and + * if the reshape of matrix B should happen only for the first run */ - void configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, float alpha, float beta); + void configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo()); // Inherited methods overridden: void run() override; @@ -83,6 +85,8 @@ private: CLTensor _tmp_b; bool _is_interleaved_transposed; bool _run_addition; + bool _is_first_run; + bool _reshape_b_only_on_first_run; }; } -- cgit v1.2.1