aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime/CL
diff options
context:
space:
mode:
authorGian Marco <gianmarco.iodice@arm.com>2017-12-16 19:33:50 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:33 +0000
commit1d25ed54a948639d1894c8b021940df70005d519 (patch)
tree96a29126c5b61299d64496fad7f6844412ab2cca /arm_compute/runtime/CL
parent57b20109108a90113d29d21ce7d3c873ff19749c (diff)
downloadComputeLibrary-1d25ed54a948639d1894c8b021940df70005d519.tar.gz
COMPMID-759 - CLGEMM optimization for McVail benchmarks
This patch introduces an optimization for CLGEMM on Bifrost architectures which can bring to 40% of FMA utilization on config 3 of McVail. The new CLGEMM does not require any reshape of matrix A and matrix B. This patch also adds the auto-config in CLConvolutionLayer and CLGEMM and extends the interface for NEGEMM and CLGEMM. Change-Id: Ibb354eda45e9ca64b14a99700fb21dff5989dda9 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/113716 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'arm_compute/runtime/CL')
-rw-r--r--arm_compute/runtime/CL/functions/CLConvolutionLayer.h3
-rw-r--r--arm_compute/runtime/CL/functions/CLGEMM.h18
2 files changed, 12 insertions, 9 deletions
diff --git a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
index a8a04a0bbf..3fe6604db9 100644
--- a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h
@@ -138,10 +138,9 @@ private:
CLTensor _gemm_output;
CLTensor _tmp_output;
- bool _append_bias;
- bool _is_fully_connected_convolution;
bool _are_weights_reshaped;
bool _is_quantized;
+ bool _is_interleaved_transposed;
};
}
#endif /* __ARM_COMPUTE_CLCONVOLUTIONLAYER_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLGEMM.h b/arm_compute/runtime/CL/functions/CLGEMM.h
index 2765b77b7d..bf41226bda 100644
--- a/arm_compute/runtime/CL/functions/CLGEMM.h
+++ b/arm_compute/runtime/CL/functions/CLGEMM.h
@@ -61,14 +61,16 @@ public:
*
* @note Whilst the first input tensor can be a vector, the second input tensor must be at least a matrix
*
- * @param[in] a First input tensor (Matrix or Vector A). Data types supported: QS8/QS16/F16/F32
- * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a.
- * @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a.
- * @param[out] output Output tensor. Data type supported: same as @p a
- * @param[in] alpha Weight of the matrix product
- * @param[in] beta Weight of matrix C
+ * @param[in] a First input tensor (Matrix or Vector A). Data types supported: QS8/QS16/F16/F32
+ * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a.
+ * @param[in] c Third input tensor (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a.
+ * @param[out] output Output tensor. Data type supported: same as @p a
+ * @param[in] alpha Weight of the matrix product
+ * @param[in] beta Weight of matrix C
+ * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
+ * if the reshape of matrix B should happen only for the first run
*/
- void configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, float alpha, float beta);
+ void configure(const ICLTensor *a, const ICLTensor *b, const ICLTensor *c, ICLTensor *output, float alpha, float beta, const GEMMInfo &gemm_info = GEMMInfo());
// Inherited methods overridden:
void run() override;
@@ -83,6 +85,8 @@ private:
CLTensor _tmp_b;
bool _is_interleaved_transposed;
bool _run_addition;
+ bool _is_first_run;
+ bool _reshape_b_only_on_first_run;
};
}